knitr::opts_chunk$set(echo = TRUE, message = FALSE, warning = FALSE)
library(knitr)
library(rmarkdown)
library(tidyverse) #for data cleaning and visualization

library(psych) # does factor analysis 
library(GPArotation) #for additional matrix roration; but may not be necesary for the current aanalysis
library(nFactors) # find n of factors

options(max.print = 999999) # we will have large outputs

library(MVN) # Multivariate normality test
library(mvoutlier) #multivariate outliers

library(lme4)
library(lmerTest)
library(emmeans)
library(MuMIn)
#emm_options(lmerTest.limit = 27378)


# loading dataset
btr_data <- here::here("dataset/xml_results_BTR58_2020-5-26.csv") %>%
  read_csv()


# data cleaning 
btr_indices <- btr_data[,7:dim(btr_data)[2]] #create a data set only including the ling indices

btr_selected_var <- as.data.frame(cbind(btr_data[,1:6], btr_indices[, colSums(btr_indices != 0) > 0])) #filter it and combine them again

btr_filtered <- btr_selected_var %>%
  filter(nwords > 50)  # lengths is set 100 words

btr_filtered <- as.data.frame(cbind(btr_filtered[,1:6], btr_filtered[,c("nwords","wrd_length")], btr_filtered[,9:dim(btr_filtered)[2]]))  #swapping nwoards and wrdlengths column (since we do not use nwords in EFA)

#setting up the variable 
attach(btr_filtered)

verbs_delete <- c("modal_necessity", "intransitive_activity_phrasal_verb", "intransitive_occurence_phrasal_verb", "intransitive_aspectual_phrasal_verb", "copular_phrasal_verb", "transitive_activity_phrasal_verb", "transitive_mental_phrasal_verb", "transitive_communication_phrasal_verb")

adj_delete <- c("size_attributive_adj", "time_attributive_adj", "color_attributive_adj", "evaluative_attributive_adj", "relational_attributive_adj", "topical__attributive_adj", "attitudinal_adj", "likelihood_adj", "certainty_adj", "ability_willingness_adj", "personal_affect_adj", "ease_difficulty_adj")

adv_delete <- c("attitudinal_adverb", "likelihood_adverb", "nonfactive_adverb", "hedges_adverb")

others_delete <- c("wh_relative_obj_clause", "that_verb_clause_nonfactive", "that_verb_clause_attitudinal", "that_verb_clause_likelihood", "that_adjective_clause", "that_adjective_clause_attitudinal", "that_adjective_clause_likelihood", "that_noun_clause_attitudinal", "that_noun_clause_factive", "that_noun_clause_likelihood", "that_noun_clause_nonfactive", "to_clause_verb_to_speech_act", "to_clause_verb_probability", "to_clause_verb_cognition", "to_clause_adjective_certainty", "to_clause_adjective_ability_willingness", "to_clause_adjective_certainty", "to_clause_adjective_ease_difficulty", "to_clause_adjective_evaluative", "to_clause_adjective_personal_affect")

delete_<- c("infinitive_prop", "pp_all" )

btr_filtered <- btr_filtered[, !(names(btr_filtered) %in% verbs_delete)]
btr_filtered <- btr_filtered[, !(names(btr_filtered) %in% adj_delete)]
btr_filtered <- btr_filtered[, !(names(btr_filtered) %in% adv_delete)]
btr_filtered <- btr_filtered[, !(names(btr_filtered) %in% others_delete)]
btr_filtered <- btr_filtered[, !(names(btr_filtered) %in% delete_)]

head(btr_filtered)
# count the number of columns
n_col <- dim(btr_filtered)[2]


## Doublecheck if the number of factor is reflected
MD.reduce <- function(fa_model, cutoff_loading, cut_off_communality) {
  #This function takes, Factor analysis model, a set of values for cutoff scores
  #output: a dataframe of linguistic indices, primary factor, factor loading, and communality 
  pattern_mat <- as.data.frame.matrix(fa_model$loadings)
  comu_vec <- as.data.frame(fa_model$communality)
  dim(pattern_mat)[1]
  # This is a holder
  Var_retained <- as.data.frame(cbind(pattern_mat[,0], rep(NA, dim(pattern_mat)[1]), rep(NA, dim(pattern_mat)[1]), rep(NA, dim(pattern_mat)[1])))
  colnames(Var_retained)[1] <- "factor_loaded"
  colnames(Var_retained)[2] <- "loading"
  colnames(Var_retained)[3] <- "communality"
  
  # iterate through the pattern matrix 
  for (f in 1:dim(pattern_mat)[2]) { #for each column
    for (i in 1:dim(pattern_mat)[1]) { # for each row
      if (abs(comu_vec[i,1]) >= cut_off_communality){ #check for communality criteria (above .15; Biber, 2006)
        if (abs(pattern_mat[i,f]) >= cutoff_loading & abs(pattern_mat[i,f]) == max(abs(pattern_mat[i,]))) {#
          Var_retained[i,1] <- names(pattern_mat)[f] # store the factor number -> change here 
          Var_retained[i,2] <- pattern_mat[i,f] #store the actual loading
          }
        }
     Var_retained[i,3] <- comu_vec[i,1] #store the actual communality
      }
  }
return(Var_retained)
}


MD.scores <- function(fa_model,reduced_item_list, data) {
  #This function takes FA model, reduced variable list
  factor <- matrix(NA, nrow = dim(data)[1], ncol = dim(fa_model$loadings)[2]) #create an empty matrix for MD scores
  colnames(factor) <- colnames(fa_model$loadings)

  for (f in 1:fa_model$factors) { #iterate for a number of factors
    pos_var <- as.vector(rownames(reduced_item_list[which(reduced_item_list$factor_loaded == colnames(factor)[f] & reduced_item_list$loading >= 0),])) #extract list of var_name
    neg_var <- as.vector(rownames(reduced_item_list[which(reduced_item_list$factor_loaded == colnames(factor)[f] & reduced_item_list$loading <= 0),])) #extract list of var_name
    
    pos_indices <- data[, names(data) %in% pos_var] #subset of data based on variable name grabbed
    neg_indices <- data[, names(data) %in% neg_var] #subset of data based on variable name grabbed
    factor[,f] <- rowSums(scale(pos_indices)) - rowSums(scale(neg_indices)) #standardize them and add them up for each text
  }
  factor_score <- as.data.frame(cbind(data[,1:6], factor)) #finally, combine metadata and factor score for dataset
  return(factor_score)
}

## This prints variable in the factor and plot
MD.visualize <- function(fa_model, reduced_data, dataset, x, xorder = F, color, wrap =F, grid =F, xlab =F, ylab = F, legend = T) {
  n_factor <- fa_model$factors 
  factor_name <- sort(colnames(fa_model$loadings))
  score <- MD.scores(fa_model, reduced_data, dataset)
  xorder = tolower(x)
  if (xorder == 'nlengths'){
    xorder = reorder(unique(x), nchar(unique(x)))
  }
  
  for (f in 1:n_factor) {
    print(paged_table(reduced_data[which(reduced_data$factor_loaded == factor_name[f]),]))
    
    
    if (xlab !=F){
      
    }
    if (ylab !=F){
      
    }

    plot <- ggplot(score, aes(x = reorder(tolower(x), xorder),  y = get(factor_name[f]), color = color)) +
                geom_jitter(alpha = .2) +
                geom_violin() +
                geom_boxplot(width = .3, outlier.alpha = 0) +
                theme_bw() +
                theme(axis.text.x = element_text(angle = 30, hjust = 1)) +
                labs(y = paste("Dimension", f, sep = " "), x = paste(toupper(substring(deparse(substitute(x)), 1,1)), substring(deparse(substitute(x)), 2), sep="", collapse=" ") , color = deparse(substitute(color)) )
    
    
    if (wrap != FALSE) {
      plot <- plot + facet_wrap(wrap, scales = "free_x")
    }
    
    if (grid != FALSE) {
      plot <- plot + facet_grid(grid, scales = "free_x")
    }
    
    if (legend == F) {
      plot <- plot + theme(legend.position = "none")
    }

    
    print(plot)
    cat("\r\n\r\n")

      }
} 

1 Setting up

1.1 Defining Functions

  1. MD.reduce takes Factor analysis results (fit object) and reduce the linguistic indices according to miminum factor loadings and communalities (see Biber, 1988, 2006).
  2. MD.scores calculates Factor scores as described in Biber (2006).
  3. MD.visualize plots the scores based on MD.scores and print a list of variables relevant to the factor alongside.

1.2 Dataset

head(btr_data)
xtabs(~btr_data$discipline)
## btr_data$discipline
##           business          education        engineering         humanities 
##               1271                531                774                674 
##    natural_science              other service_encounters     social_science 
##                627                 30                 22                858
xtabs(~btr_data$text_type)
## btr_data$text_type
##   announcements_discussions      assignment_description 
##                         403                         390 
##   classroom_management_talk           course_management 
##                          65                          21 
##                course_packs       instructional_reading 
##                          27                         663 
##         instructional_video                         lab 
##                        2217                          17 
##                     lecture                office_hours 
##                         177                          11 
## other_institutional_writing                        quiz 
##                          37                         306 
##           service_encounter                      slides 
##                          22                         140 
##                 study_group                    syllabus 
##                          25                         179 
##                   textbooks 
##                          87

1.2.1 checking the distribution of short files.

ggplot(btr_data[which(btr_data$nwords < 200),], aes(x = nwords)) +
  geom_histogram()

The final set ready to be analyzed.

paged_table(btr_filtered)

2 Multidimensional analysis (MDA)

2.1 Preliminary analysis

2.1.1 Distributions

n_ind <- dim(btr_filtered[,8:n_col])[2]
library(Routliers)
library(grid)

for (k in 1:n_ind) {
  breaks <- pretty(range(btr_filtered[, k+7]), n = nclass.FD(btr_filtered[, k+7]), min.n = 1)
  binwidth <- breaks[2] - breaks[1]
  # boundary for outliers
  out <- Routliers::outliers_mad(btr_filtered[,k+7], b = 1.483, threshold = 2.24) #see Kline (2015) p. 72 for the formula
  out$UL_CI_MAD
  p <- ggplot(btr_filtered[,8:n_col], aes(x = btr_filtered[,k + 7], y = ..density..)) +
    geom_histogram(binwidth = binwidth) +
    geom_density(color = "blue") +
    geom_vline(xintercept = out$UL_CI_MAD, color="orange", linetype = "longdash") +  
    geom_vline(xintercept = out$LL_CI_MAD, color="orange", linetype = "longdash") +  
    geom_vline(xintercept = mean(btr_filtered[,k+7]) + 3* sd(btr_filtered[,k+7]), color = "red", linetype = 'dashed') +
    geom_vline(xintercept = mean(btr_filtered[,k+7]) - 3* sd(btr_filtered[,k+7]), color = "red", linetype = 'dashed') +
    labs(title = colnames(btr_filtered)[7 + k], x = colnames(btr_filtered)[7 + k])
  print(p)
  cat("\r\n\r\n")
}

2.1.2 Data structure (showing first 6 rows)

head(btr_filtered[8:n_col])

2.1.3 Factorability of the dataset

  • KMO looks good (0.820)
KMO_btr <- KMO(btr_filtered[8:n_col])
KMO_btr$MSA
## [1] 0.816818
  • The index “Modal predictive” might behave badly, but no obvious issue was identified.
paged_table(as.data.frame(KMO_btr$MSAi))
  • Bartlett test indicated that correlation matrix is meaningful for FA.
bartlett.test(btr_filtered[8:n_col])
## 
##  Bartlett test of homogeneity of variances
## 
## data:  btr_filtered[8:n_col]
## Bartlett's K-squared = 1470539, df = 85, p-value < 2.2e-16

2.1.3.1 The number of unique cells in the variance covariance matrix

This is related to df in the SEM approach, just to show the size of the matrix

n_indicator = dim(btr_filtered[8:n_col])[2]
(n_indicator * (n_indicator + 1))/ 2
## [1] 3741

2.1.3.2 Multivatiate normality assumption (to examine which of PA or ML should be used)

  • Very very skewed data set. Therefore we will extract factors using Principal Axis factoring (PA), not maximum likelihood (ML).
normal <- psych::mardia(btr_filtered[8:n_col],na.rm = TRUE,plot=TRUE)

outlier <- psych::outlier(btr_filtered[8:n_col], plot = TRUE, bad = 10, na.rm = TRUE) #needs to check the cut-off value for this

2.2 Parallel analysis and scree plot

  • Parallel analysis indicated 21 factors could be extracted. However, when the sample size is huge, parallel analysis is known to produce very large amount of factors, leading to over-factoring.
  • Therefore, we then visually inspected the Scree plot, which suggested that the eigenvalue continue to drop until it reaches six-factor solution. This means that four and five factors may be under-factoring (which should be avoided; see Brown, 2015). Six and seven factor solution should not be different from each other.
  • Overall, we should not stop at 4 or 5, but we can stop at 6 or 7 depending on the interpretability.
screeplot <- fa.parallel(btr_filtered[8:n_col], fa = "fa", fm = "pa", n.iter=100, error.bars = T, sim = FALSE,quant=.95) 

## Parallel analysis suggests that the number of factors =  24  and the number of components =  NA
#factor method to use. (minres, ml, uls, wls, gls, pa)

2.3 Six-factor solution (Scree test preferred this one)

  • explained variance 31%
  • The root mean square of the residuals (RMSR) is 0.04
  • The df corrected root mean square of the residuals is 0.05
  • Tucker Lewis Index of factoring reliability = 0.475
  • RMSEA index = 0.082 and the 95 % confidence intervals are 0.081 0.083
PA6 <- fa(btr_filtered[8:n_col],nfactors= 6, n.iter=1, rotate="Promax",
          residuals=TRUE, SMC=TRUE, missing=TRUE,impute="median",
          min.err = 0.001,  max.iter = 50, symmetric=TRUE, warnings=TRUE, fm="pa",
          alpha=.05, p=.05, oblique.scores=TRUE)

PA6 <- fa.sort(PA6, polar=FALSE)


PA6loading <- as.data.frame(cbind(PA6$loadings, PA6$communality, PA6$uniquenesses,PA6$complexity))
#write.csv(PA6loading, "MD_result/PA6Loading_forETSreport.csv")

print(PA6, digit = 3)
## Factor Analysis using method =  pa
## Call: fa(r = btr_filtered[8:n_col], nfactors = 6, n.iter = 1, rotate = "Promax", 
##     residuals = TRUE, SMC = TRUE, missing = TRUE, impute = "median", 
##     min.err = 0.001, max.iter = 50, symmetric = TRUE, warnings = TRUE, 
##     fm = "pa", alpha = 0.05, p = 0.05, oblique.scores = TRUE)
## Standardized loadings (pattern matrix) based upon correlation matrix
##                                       PA1    PA3    PA4    PA2    PA6    PA5
## nonfinite_prop                     -0.813  0.221 -0.070  0.072  0.029 -0.185
## cc_clause                           0.774 -0.016 -0.081 -0.112 -0.005 -0.031
## contraction                         0.754  0.049 -0.210 -0.228 -0.065 -0.050
## emphatics                           0.750  0.003 -0.097 -0.111 -0.023 -0.108
## be_mv                               0.732 -0.055  0.029 -0.057 -0.024  0.022
## pp_demonstrative                    0.634  0.019  0.108 -0.198  0.000 -0.125
## nn_all                             -0.619 -0.341 -0.105  0.099 -0.087 -0.042
## pp1                                 0.613  0.151 -0.028 -0.074 -0.119 -0.035
## pp3_it                              0.585 -0.006 -0.077 -0.186 -0.011  0.114
## mean_verbal_deps                   -0.568  0.264 -0.093 -0.056  0.017 -0.156
## factive_adverb                      0.557  0.072 -0.104  0.067  0.048  0.016
## mlc                                -0.484 -0.177 -0.180 -0.032 -0.035 -0.199
## nn_abstract                        -0.442  0.077  0.026  0.307 -0.049 -0.190
## mltu                               -0.433  0.031 -0.073  0.048  0.152 -0.244
## mattr                              -0.411  0.133  0.000  0.284  0.046  0.249
## that_relative_clause                0.348  0.115 -0.120  0.234  0.339 -0.201
## det_nominal                         0.341  0.009  0.205 -0.238  0.175 -0.020
## adverbial_subordinator_causitive    0.308 -0.024  0.073 -0.069  0.043  0.123
## pp_indefinite                       0.292  0.098 -0.107 -0.135  0.046  0.129
## complementizer_that0                0.287  0.114  0.004 -0.009 -0.131  0.166
## jj_predicative                      0.278  0.152  0.113  0.087 -0.075  0.053
## past_participial_clause            -0.272 -0.105  0.082  0.055  0.079 -0.064
## amplifiers_adverb                   0.272 -0.017 -0.066  0.087  0.020  0.081
## discourse_particle                  0.251  0.026  0.228 -0.189 -0.006 -0.050
## agentless_passive                  -0.249 -0.079  0.248  0.038  0.086  0.047
## pv_do                               0.200  0.150  0.017 -0.037 -0.023  0.027
## conjuncts_adverb                   -0.144 -0.062  0.136 -0.118  0.072 -0.041
## downtoners_adverb                   0.124 -0.056  0.035 -0.027  0.070 -0.002
## all_phrasal_verbs                   0.123  0.039 -0.031 -0.104 -0.008  0.111
## to_clause                          -0.174  0.803  0.021  0.053  0.075  0.011
## verb                                0.183  0.695  0.173 -0.121  0.056  0.099
## non_past_tense                      0.316  0.665  0.036 -0.162 -0.022 -0.216
## to_clause_verb                      0.205  0.515 -0.015 -0.065  0.048 -0.050
## pp2                                -0.102  0.514 -0.208 -0.171 -0.039 -0.116
## mental_verb                         0.148  0.505  0.120  0.076 -0.105  0.068
## to_clause_verb_desire               0.095  0.447 -0.009 -0.023 -0.013 -0.078
## to_clause_noun                     -0.388  0.439  0.049  0.095  0.072  0.067
## dc_c                                0.102  0.413  0.240  0.076  0.400 -0.106
## activity_verb                      -0.080  0.391 -0.105 -0.360  0.101  0.054
## poss_nominal                       -0.089  0.372 -0.250  0.015 -0.031  0.290
## mean_nominal_deps                   0.017 -0.299 -0.165  0.088  0.135 -0.190
## modal_possibility                   0.041  0.292  0.166  0.028  0.063 -0.112
## to_clause_verb_to_causative        -0.002  0.279 -0.083  0.012  0.081  0.113
## aspectual_verb                     -0.159  0.267 -0.016 -0.042  0.032  0.002
## wh_clause                           0.098  0.259  0.106  0.088 -0.065 -0.056
## communication_verb                  0.018  0.246  0.069 -0.022 -0.009  0.170
## to_clause_adjective                -0.111  0.235  0.028  0.082  0.012  0.070
## split_aux                           0.143  0.192  0.035  0.011  0.042 -0.036
## causation_verb                     -0.053  0.192  0.089  0.087 -0.039 -0.127
## modal_predictive                   -0.187  0.189  0.035 -0.013 -0.030 -0.084
## by_passive                         -0.054 -0.175  0.078  0.065  0.102  0.070
## adverbial_subordinator_other       -0.018  0.168  0.158  0.010  0.043  0.080
## that_verb_clause                   -0.033  0.069  0.886 -0.039 -0.120 -0.017
## that_complement_clause              0.087  0.083  0.839  0.002 -0.082  0.005
## that_verb_clause_factive           -0.141  0.069  0.787 -0.117 -0.103 -0.078
## ccomp_c                             0.160  0.276  0.469  0.081 -0.178 -0.041
## existence_verb                     -0.043  0.040  0.234  0.104  0.027 -0.075
## that_noun_clause                    0.084  0.040  0.213  0.090  0.078  0.035
## wh_question                        -0.018  0.012 -0.168 -0.052 -0.115 -0.011
## wrd_length                         -0.551 -0.019 -0.003  0.588 -0.026  0.024
## cc_phrase                          -0.217  0.100 -0.108  0.561 -0.073 -0.056
## cc_nominal                          0.033  0.052 -0.148  0.526 -0.065 -0.115
## nominalization                     -0.375 -0.008  0.080  0.498 -0.097 -0.136
## jj_attributive                     -0.167 -0.239  0.035  0.441  0.049 -0.076
## amod_nominal                        0.104 -0.142  0.021  0.373  0.082 -0.008
## prep_phrase                        -0.213 -0.218  0.169  0.354  0.146  0.070
## adverbial_subordinator_conditional -0.006  0.174  0.044 -0.330 -0.004 -0.112
## prep_nominal                        0.055 -0.291  0.116  0.301  0.101  0.017
## time_adverbials                     0.225  0.014  0.142 -0.277  0.032 -0.090
## place_adverbials                    0.150  0.022  0.005 -0.271  0.000 -0.119
## nn_group                           -0.030 -0.035 -0.103  0.242  0.004  0.176
## nn_cognitive                       -0.187  0.069 -0.055  0.242 -0.031 -0.061
## nn_concrete                        -0.195 -0.053 -0.046 -0.202  0.029 -0.090
## nn_place                           -0.038 -0.136 -0.058  0.191 -0.058  0.065
## nn_quantity                        -0.046 -0.109 -0.004 -0.150 -0.014 -0.121
## occurrence_verb                     0.085 -0.060  0.048  0.127  0.015  0.040
## wh_relative_clause                 -0.151 -0.067  0.003 -0.208  0.824  0.435
## relcl_c                             0.038  0.134 -0.244  0.142  0.807 -0.111
## wh_relative_subj_clause            -0.123 -0.059 -0.008 -0.181  0.708  0.441
## relcl_nominal                       0.460  0.307 -0.153  0.032  0.596 -0.023
## wh_relative_prep_clause            -0.063 -0.032  0.030 -0.007  0.301  0.032
## pp3                                 0.140  0.038 -0.050  0.028  0.077  0.611
## past_tense                          0.184 -0.191  0.036 -0.093  0.061  0.577
## nn_animate                         -0.094  0.117 -0.145  0.239  0.022  0.439
## perfect_aspect                      0.068  0.062  0.119  0.042  0.043  0.246
## nn_technical                       -0.184 -0.161  0.063 -0.128 -0.002 -0.189
##                                        h2    u2  com
## nonfinite_prop                     0.7254 0.275 1.29
## cc_clause                          0.5873 0.413 1.07
## contraction                        0.6407 0.359 1.38
## emphatics                          0.5428 0.457 1.12
## be_mv                              0.5486 0.451 1.03
## pp_demonstrative                   0.5565 0.443 1.35
## nn_all                             0.8163 0.184 1.74
## pp1                                0.4810 0.519 1.25
## pp3_it                             0.4259 0.574 1.32
## mean_verbal_deps                   0.3700 0.630 1.68
## factive_adverb                     0.3074 0.693 1.15
## mlc                                0.5394 0.461 1.97
## nn_abstract                        0.3661 0.634 2.31
## mltu                               0.3314 0.669 1.98
## mattr                              0.2991 0.701 2.80
## that_relative_clause               0.3277 0.672 3.91
## det_nominal                        0.3560 0.644 3.11
## adverbial_subordinator_causitive   0.1633 0.837 1.62
## pp_indefinite                      0.1686 0.831 2.53
## complementizer_that0               0.1889 0.811 2.45
## jj_predicative                     0.1720 0.828 2.47
## past_participial_clause            0.1258 0.874 1.93
## amplifiers_adverb                  0.0764 0.924 1.56
## discourse_particle                 0.2241 0.776 2.97
## agentless_passive                  0.1133 0.887 2.56
## pv_do                              0.0998 0.900 2.02
## conjuncts_adverb                   0.0460 0.954 4.03
## downtoners_adverb                  0.0273 0.973 2.38
## all_phrasal_verbs                  0.0529 0.947 3.33
## to_clause                          0.5552 0.445 1.12
## verb                               0.7950 0.205 1.40
## non_past_tense                     0.7967 0.203 1.82
## to_clause_verb                     0.3993 0.601 1.39
## pp2                                0.3540 0.646 1.80
## mental_verb                        0.3849 0.615 1.49
## to_clause_verb_desire              0.2409 0.759 1.16
## to_clause_noun                     0.2124 0.788 2.22
## dc_c                               0.5075 0.492 2.97
## activity_verb                      0.2961 0.704 2.42
## poss_nominal                       0.2741 0.726 2.87
## mean_nominal_deps                  0.2192 0.781 3.07
## modal_possibility                  0.1417 0.858 2.11
## to_clause_verb_to_causative        0.0957 0.904 1.72
## aspectual_verb                     0.0665 0.933 1.73
## wh_clause                          0.1070 0.893 2.22
## communication_verb                 0.1212 0.879 2.00
## to_clause_adjective                0.0569 0.943 1.96
## split_aux                          0.0832 0.917 2.13
## causation_verb                     0.0480 0.952 3.04
## modal_predictive                   0.0525 0.947 2.52
## by_passive                         0.0720 0.928 3.07
## adverbial_subordinator_other       0.0707 0.929 2.62
## that_verb_clause                   0.7373 0.263 1.06
## that_complement_clause             0.7638 0.236 1.06
## that_verb_clause_factive           0.5464 0.454 1.18
## ccomp_c                            0.3955 0.604 2.34
## existence_verb                     0.0660 0.934 1.80
## that_noun_clause                   0.1001 0.900 2.18
## wh_question                        0.0616 0.938 2.04
## wrd_length                         0.7977 0.202 2.00
## cc_phrase                          0.4138 0.586 1.51
## cc_nominal                         0.2809 0.719 1.32
## nominalization                     0.4664 0.534 2.19
## jj_attributive                     0.4063 0.594 1.99
## amod_nominal                       0.1940 0.806 1.58
## prep_phrase                        0.3670 0.633 3.50
## adverbial_subordinator_conditional 0.1789 0.821 1.82
## prep_nominal                       0.2453 0.755 2.61
## time_adverbials                    0.2086 0.791 2.75
## place_adverbials                   0.1303 0.870 2.01
## nn_group                           0.1128 0.887 2.32
## nn_cognitive                       0.1179 0.882 2.39
## nn_concrete                        0.0932 0.907 2.69
## nn_place                           0.0828 0.917 2.64
## nn_quantity                        0.0544 0.946 3.04
## occurrence_verb                    0.0317 0.968 2.90
## wh_relative_clause                 0.7104 0.290 1.77
## relcl_c                            0.6962 0.304 1.36
## wh_relative_subj_clause            0.5628 0.437 1.92
## relcl_nominal                      0.7155 0.285 2.61
## wh_relative_prep_clause            0.0953 0.905 1.16
## pp3                                0.4315 0.569 1.17
## past_tense                         0.4043 0.596 1.53
## nn_animate                         0.2879 0.712 2.10
## perfect_aspect                     0.1182 0.882 1.93
## nn_technical                       0.1323 0.868 3.96
## 
##                         PA1   PA3   PA4   PA2   PA6   PA5
## SS loadings           9.214 5.236 3.365 3.760 2.747 2.316
## Proportion Var        0.107 0.061 0.039 0.044 0.032 0.027
## Cumulative Var        0.107 0.168 0.207 0.251 0.283 0.310
## Proportion Explained  0.346 0.197 0.126 0.141 0.103 0.087
## Cumulative Proportion 0.346 0.542 0.669 0.810 0.913 1.000
## 
##  With factor correlations of 
##        PA1    PA3    PA4    PA2    PA6    PA5
## PA1  1.000  0.382  0.398 -0.217  0.134  0.210
## PA3  0.382  1.000  0.106 -0.214 -0.081  0.136
## PA4  0.398  0.106  1.000 -0.042  0.289  0.128
## PA2 -0.217 -0.214 -0.042  1.000  0.274  0.071
## PA6  0.134 -0.081  0.289  0.274  1.000 -0.070
## PA5  0.210  0.136  0.128  0.071 -0.070  1.000
## 
## Mean item complexity =  2.1
## Test of the hypothesis that 6 factors are sufficient.
## 
## The degrees of freedom for the null model are  3655  and the objective function was  47.619 with Chi Square of  215834.4
## The degrees of freedom for the model are 3154  and the objective function was  21.914 
## 
## The root mean square of the residuals (RMSR) is  0.043 
## The df corrected root mean square of the residuals is  0.047 
## 
## The harmonic number of observations is  4563 with the empirical chi square  62578.41  with prob <  0 
## The total number of observations was  4563  with Likelihood Chi Square =  99235.98  with prob <  0 
## 
## Tucker Lewis Index of factoring reliability =  0.4748
## RMSEA index =  0.0817  and the 95 % confidence intervals are  0.0812 0.0822
## BIC =  72661.21
## Fit based upon off diagonal values = 0.938
## Measures of factor score adequacy             
##                                                     PA1   PA3   PA4   PA2   PA6
## Correlation of (regression) scores with factors   0.977 0.972 0.956 0.929 0.975
## Multiple R square of scores with factors          0.955 0.944 0.914 0.862 0.951
## Minimum correlation of possible factor scores     0.910 0.888 0.828 0.725 0.902
##                                                     PA5
## Correlation of (regression) scores with factors   0.914
## Multiple R square of scores with factors          0.836
## Minimum correlation of possible factor scores     0.671

2.3.1 Extracting primary loading linguistic indices

PA6_refined <- MD.reduce(PA6, .3, .15)
paged_table(PA6_refined)

2.3.2 Number of remaining indices

PA6_refined %>%
  na.omit() %>%
  dim()
## [1] 49  3

2.3.3 Calculating the Dimensional score

PA6_scores <- MD.scores(PA6, PA6_refined, btr_filtered)
paged_table(PA6_scores)
#write.csv(PA6_scores, "MD_result/Dimensional_score_PA6_5.0_ETSreport_modified.csv")

2.3.4 Dimension score: text-type * mode * learning_environment

MD.visualize(PA6, PA6_refined, btr_filtered, x = learning_environment, color = learning_environment, grid = ~mode,legend = T, xorder = "nlengths")
##                                  factor_loaded    loading communality
## nonfinite_prop                             PA1 -0.8132950   0.7253723
## cc_clause                                  PA1  0.7742338   0.5873482
## contraction                                PA1  0.7542997   0.6407286
## emphatics                                  PA1  0.7495712   0.5427764
## be_mv                                      PA1  0.7322963   0.5486265
## pp_demonstrative                           PA1  0.6335961   0.5565309
## nn_all                                     PA1 -0.6188822   0.8163238
## pp1                                        PA1  0.6131749   0.4810347
## pp3_it                                     PA1  0.5852639   0.4258781
## mean_verbal_deps                           PA1 -0.5684253   0.3699637
## factive_adverb                             PA1  0.5566128   0.3073590
## mlc                                        PA1 -0.4843049   0.5393716
## nn_abstract                                PA1 -0.4419376   0.3660817
## mltu                                       PA1 -0.4329341   0.3313769
## mattr                                      PA1 -0.4105833   0.2990983
## that_relative_clause                       PA1  0.3478179   0.3277435
## det_nominal                                PA1  0.3413118   0.3559768
## adverbial_subordinator_causitive           PA1  0.3082464   0.1632622

## 
## 
##                                    factor_loaded    loading communality
## wrd_length                                   PA2  0.5880763   0.7977080
## cc_phrase                                    PA2  0.5613025   0.4137876
## cc_nominal                                   PA2  0.5260028   0.2808598
## nominalization                               PA2  0.4980983   0.4664133
## jj_attributive                               PA2  0.4408125   0.4063362
## amod_nominal                                 PA2  0.3734195   0.1939523
## prep_phrase                                  PA2  0.3538703   0.3669912
## adverbial_subordinator_conditional           PA2 -0.3301145   0.1789314
## prep_nominal                                 PA2  0.3007542   0.2453117

## 
## 
##                       factor_loaded   loading communality
## to_clause                       PA3 0.8032894   0.5551529
## verb                            PA3 0.6946974   0.7949891
## non_past_tense                  PA3 0.6651300   0.7967250
## to_clause_verb                  PA3 0.5146971   0.3993298
## pp2                             PA3 0.5144921   0.3540182
## mental_verb                     PA3 0.5047000   0.3849005
## to_clause_verb_desire           PA3 0.4472352   0.2408542
## to_clause_noun                  PA3 0.4389121   0.2124316
## dc_c                            PA3 0.4125804   0.5075388
## activity_verb                   PA3 0.3906763   0.2960751
## poss_nominal                    PA3 0.3721704   0.2740686

## 
## 
##                          factor_loaded   loading communality
## that_verb_clause                   PA4 0.8855422   0.7373146
## that_complement_clause             PA4 0.8385487   0.7637723
## that_verb_clause_factive           PA4 0.7869121   0.5464035
## ccomp_c                            PA4 0.4685438   0.3955203

## 
## 
##            factor_loaded   loading communality
## pp3                  PA5 0.6110859   0.4314571
## past_tense           PA5 0.5767110   0.4043281
## nn_animate           PA5 0.4394147   0.2878991

## 
## 
##                         factor_loaded   loading communality
## wh_relative_clause                PA6 0.8238232   0.7104070
## relcl_c                           PA6 0.8071224   0.6962011
## wh_relative_subj_clause           PA6 0.7076527   0.5628490
## relcl_nominal                     PA6 0.5960646   0.7154814

MD.visualize(PA6, PA6_refined, btr_filtered, x = text_type, color = mode, grid = ~mode+learning_environment, legend = F, xorder = "nlengths")
##                                  factor_loaded    loading communality
## nonfinite_prop                             PA1 -0.8132950   0.7253723
## cc_clause                                  PA1  0.7742338   0.5873482
## contraction                                PA1  0.7542997   0.6407286
## emphatics                                  PA1  0.7495712   0.5427764
## be_mv                                      PA1  0.7322963   0.5486265
## pp_demonstrative                           PA1  0.6335961   0.5565309
## nn_all                                     PA1 -0.6188822   0.8163238
## pp1                                        PA1  0.6131749   0.4810347
## pp3_it                                     PA1  0.5852639   0.4258781
## mean_verbal_deps                           PA1 -0.5684253   0.3699637
## factive_adverb                             PA1  0.5566128   0.3073590
## mlc                                        PA1 -0.4843049   0.5393716
## nn_abstract                                PA1 -0.4419376   0.3660817
## mltu                                       PA1 -0.4329341   0.3313769
## mattr                                      PA1 -0.4105833   0.2990983
## that_relative_clause                       PA1  0.3478179   0.3277435
## det_nominal                                PA1  0.3413118   0.3559768
## adverbial_subordinator_causitive           PA1  0.3082464   0.1632622

## 
## 
##                                    factor_loaded    loading communality
## wrd_length                                   PA2  0.5880763   0.7977080
## cc_phrase                                    PA2  0.5613025   0.4137876
## cc_nominal                                   PA2  0.5260028   0.2808598
## nominalization                               PA2  0.4980983   0.4664133
## jj_attributive                               PA2  0.4408125   0.4063362
## amod_nominal                                 PA2  0.3734195   0.1939523
## prep_phrase                                  PA2  0.3538703   0.3669912
## adverbial_subordinator_conditional           PA2 -0.3301145   0.1789314
## prep_nominal                                 PA2  0.3007542   0.2453117

## 
## 
##                       factor_loaded   loading communality
## to_clause                       PA3 0.8032894   0.5551529
## verb                            PA3 0.6946974   0.7949891
## non_past_tense                  PA3 0.6651300   0.7967250
## to_clause_verb                  PA3 0.5146971   0.3993298
## pp2                             PA3 0.5144921   0.3540182
## mental_verb                     PA3 0.5047000   0.3849005
## to_clause_verb_desire           PA3 0.4472352   0.2408542
## to_clause_noun                  PA3 0.4389121   0.2124316
## dc_c                            PA3 0.4125804   0.5075388
## activity_verb                   PA3 0.3906763   0.2960751
## poss_nominal                    PA3 0.3721704   0.2740686

## 
## 
##                          factor_loaded   loading communality
## that_verb_clause                   PA4 0.8855422   0.7373146
## that_complement_clause             PA4 0.8385487   0.7637723
## that_verb_clause_factive           PA4 0.7869121   0.5464035
## ccomp_c                            PA4 0.4685438   0.3955203

## 
## 
##            factor_loaded   loading communality
## pp3                  PA5 0.6110859   0.4314571
## past_tense           PA5 0.5767110   0.4043281
## nn_animate           PA5 0.4394147   0.2878991

## 
## 
##                         factor_loaded   loading communality
## wh_relative_clause                PA6 0.8238232   0.7104070
## relcl_c                           PA6 0.8071224   0.6962011
## wh_relative_subj_clause           PA6 0.7076527   0.5628490
## relcl_nominal                     PA6 0.5960646   0.7154814

2.3.5 Dimension score: Discipline by mode

MD.visualize(PA6, PA6_refined, btr_filtered, x = discipline, color = discipline, grid = mode~learning_environment, legend = F, xorder = "nlengths")
##                                  factor_loaded    loading communality
## nonfinite_prop                             PA1 -0.8132950   0.7253723
## cc_clause                                  PA1  0.7742338   0.5873482
## contraction                                PA1  0.7542997   0.6407286
## emphatics                                  PA1  0.7495712   0.5427764
## be_mv                                      PA1  0.7322963   0.5486265
## pp_demonstrative                           PA1  0.6335961   0.5565309
## nn_all                                     PA1 -0.6188822   0.8163238
## pp1                                        PA1  0.6131749   0.4810347
## pp3_it                                     PA1  0.5852639   0.4258781
## mean_verbal_deps                           PA1 -0.5684253   0.3699637
## factive_adverb                             PA1  0.5566128   0.3073590
## mlc                                        PA1 -0.4843049   0.5393716
## nn_abstract                                PA1 -0.4419376   0.3660817
## mltu                                       PA1 -0.4329341   0.3313769
## mattr                                      PA1 -0.4105833   0.2990983
## that_relative_clause                       PA1  0.3478179   0.3277435
## det_nominal                                PA1  0.3413118   0.3559768
## adverbial_subordinator_causitive           PA1  0.3082464   0.1632622

## 
## 
##                                    factor_loaded    loading communality
## wrd_length                                   PA2  0.5880763   0.7977080
## cc_phrase                                    PA2  0.5613025   0.4137876
## cc_nominal                                   PA2  0.5260028   0.2808598
## nominalization                               PA2  0.4980983   0.4664133
## jj_attributive                               PA2  0.4408125   0.4063362
## amod_nominal                                 PA2  0.3734195   0.1939523
## prep_phrase                                  PA2  0.3538703   0.3669912
## adverbial_subordinator_conditional           PA2 -0.3301145   0.1789314
## prep_nominal                                 PA2  0.3007542   0.2453117

## 
## 
##                       factor_loaded   loading communality
## to_clause                       PA3 0.8032894   0.5551529
## verb                            PA3 0.6946974   0.7949891
## non_past_tense                  PA3 0.6651300   0.7967250
## to_clause_verb                  PA3 0.5146971   0.3993298
## pp2                             PA3 0.5144921   0.3540182
## mental_verb                     PA3 0.5047000   0.3849005
## to_clause_verb_desire           PA3 0.4472352   0.2408542
## to_clause_noun                  PA3 0.4389121   0.2124316
## dc_c                            PA3 0.4125804   0.5075388
## activity_verb                   PA3 0.3906763   0.2960751
## poss_nominal                    PA3 0.3721704   0.2740686

## 
## 
##                          factor_loaded   loading communality
## that_verb_clause                   PA4 0.8855422   0.7373146
## that_complement_clause             PA4 0.8385487   0.7637723
## that_verb_clause_factive           PA4 0.7869121   0.5464035
## ccomp_c                            PA4 0.4685438   0.3955203

## 
## 
##            factor_loaded   loading communality
## pp3                  PA5 0.6110859   0.4314571
## past_tense           PA5 0.5767110   0.4043281
## nn_animate           PA5 0.4394147   0.2878991

## 
## 
##                         factor_loaded   loading communality
## wh_relative_clause                PA6 0.8238232   0.7104070
## relcl_c                           PA6 0.8071224   0.6962011
## wh_relative_subj_clause           PA6 0.7076527   0.5628490
## relcl_nominal                     PA6 0.5960646   0.7154814

2.4 Seven-factor solution

Another better fit, but slight changes. We may be prioritizing the interpretability, since none of it is favored in statistical sense. - explained 33 % of the variance. - The root mean square of the residuals (RMSR) is 0.04 - The df corrected root mean square of the residuals is 0.04 - Tucker Lewis Index of factoring reliability = 0.503 - RMSEA index = 0.08 and the 95 % confidence intervals are 0.079 0.08

PA7 <- fa(btr_filtered[8:n_col],nfactors= 7, n.iter=1, rotate="Promax", scores="Bartlett", 
          residuals=TRUE, SMC=TRUE, missing=TRUE,impute="median",
          min.err = 0.001,  max.iter = 50, symmetric=TRUE, warnings=TRUE, fm="pa",
          alpha=.05, p=.05, oblique.scores=TRUE)

PA7 <- fa.sort(PA7, polar = FALSE)
print(PA7)
## Factor Analysis using method =  pa
## Call: fa(r = btr_filtered[8:n_col], nfactors = 7, n.iter = 1, rotate = "Promax", 
##     scores = "Bartlett", residuals = TRUE, SMC = TRUE, missing = TRUE, 
##     impute = "median", min.err = 0.001, max.iter = 50, symmetric = TRUE, 
##     warnings = TRUE, fm = "pa", alpha = 0.05, p = 0.05, oblique.scores = TRUE)
## Standardized loadings (pattern matrix) based upon correlation matrix
##                                      PA1   PA3   PA4   PA2   PA7   PA5   PA6
## nonfinite_prop                     -0.82  0.15 -0.05  0.04 -0.18 -0.16  0.00
## contraction                         0.82  0.02 -0.16 -0.04 -0.12 -0.02  0.04
## cc_clause                           0.80 -0.09 -0.01  0.04 -0.08  0.02  0.10
## emphatics                           0.78 -0.03 -0.05  0.01 -0.04 -0.08  0.12
## be_mv                               0.75 -0.01  0.02 -0.01  0.13  0.01  0.07
## wrd_length                         -0.73  0.00 -0.03 -0.02  0.28  0.06  0.31
## pp_demonstrative                    0.69  0.03  0.11  0.02 -0.01 -0.14  0.00
## nn_all                             -0.65 -0.41 -0.04 -0.08 -0.12  0.00 -0.02
## pp3_it                              0.64  0.04 -0.09 -0.02  0.02  0.09 -0.06
## pp1                                 0.62  0.08  0.03 -0.08 -0.10  0.03  0.12
## mean_verbal_deps                   -0.56  0.09  0.01  0.05 -0.38 -0.08 -0.02
## nn_abstract                        -0.53  0.03  0.04 -0.02  0.04 -0.14  0.22
## nominalization                     -0.53 -0.04  0.09 -0.07  0.17 -0.07  0.34
## factive_adverb                      0.52 -0.01 -0.03  0.09 -0.03  0.08  0.19
## mattr                              -0.51  0.12 -0.01  0.03  0.10  0.27  0.06
## mlc                                -0.50 -0.43  0.01  0.03 -0.47 -0.09  0.03
## det_nominal                         0.42  0.07  0.16  0.16  0.04 -0.09 -0.16
## to_clause_noun                     -0.42  0.41  0.03  0.06 -0.04  0.08  0.00
## prep_phrase                        -0.33 -0.20  0.16  0.14  0.24  0.07  0.13
## adverbial_subordinator_causitive    0.33  0.02  0.05  0.03  0.08  0.09 -0.06
## pp_indefinite                       0.32  0.08 -0.09  0.04 -0.08  0.14 -0.06
## discourse_particle                  0.32  0.08  0.18 -0.02  0.04 -0.10 -0.12
## time_adverbials                     0.31  0.01  0.14  0.03 -0.11 -0.12 -0.15
## past_participial_clause            -0.27 -0.01  0.01  0.05  0.16 -0.13 -0.05
## complementizer_that0                0.27  0.04  0.06 -0.12 -0.10  0.23  0.04
## nn_cognitive                       -0.26  0.03 -0.03 -0.01  0.03 -0.01  0.19
## jj_predicative                      0.25  0.18  0.08 -0.07  0.12  0.06  0.10
## amplifiers_adverb                   0.24 -0.01 -0.06  0.02  0.09  0.09  0.09
## place_adverbials                    0.24  0.02  0.01  0.00 -0.14 -0.14 -0.13
## pv_do                               0.21  0.14  0.02 -0.02 -0.02  0.04  0.02
## all_phrasal_verbs                   0.15  0.01 -0.01 -0.01 -0.09  0.12 -0.07
## downtoners_adverb                   0.14  0.01 -0.01  0.06  0.11 -0.05 -0.03
## verb                                0.23  0.84  0.02  0.02  0.14  0.02 -0.10
## to_clause                          -0.18  0.76 -0.02  0.07 -0.07  0.03  0.06
## non_past_tense                      0.39  0.70 -0.04 -0.01 -0.03 -0.24  0.05
## to_clause_verb                      0.23  0.51 -0.05  0.05 -0.04 -0.05  0.04
## activity_verb                       0.04  0.43 -0.16  0.06 -0.17 -0.01 -0.27
## modal_possibility                   0.05  0.41  0.05  0.04  0.20 -0.18  0.03
## to_clause_verb_desire               0.11  0.40 -0.01  0.00 -0.09 -0.05  0.07
## pp2                                -0.05  0.39 -0.15 -0.02 -0.35 -0.05  0.00
## mental_verb                         0.11  0.39  0.17 -0.08 -0.13  0.16  0.13
## mean_nominal_deps                  -0.01 -0.36 -0.10  0.17 -0.02 -0.17  0.14
## prep_nominal                       -0.05 -0.33  0.16  0.12  0.16  0.05  0.19
## to_clause_verb_to_causative        -0.01  0.27 -0.09  0.07 -0.03  0.12  0.00
## wh_clause                           0.07  0.25  0.09 -0.05  0.03 -0.03  0.11
## to_clause_adjective                -0.13  0.25  0.00  0.00  0.04  0.07  0.03
## modal_predictive                   -0.17  0.24 -0.02 -0.04  0.04 -0.11 -0.02
## causation_verb                     -0.07  0.23  0.04 -0.04  0.10 -0.14  0.09
## split_aux                           0.15  0.23  0.00  0.04  0.07 -0.05  0.04
## adverbial_subordinator_other       -0.02  0.22  0.10  0.02  0.10  0.04 -0.05
## aspectual_verb                     -0.15  0.22  0.00  0.04 -0.12  0.02 -0.03
## that_verb_clause                   -0.05 -0.02  0.92 -0.10 -0.05  0.01 -0.10
## that_complement_clause              0.05 -0.04  0.92 -0.06 -0.07  0.06 -0.04
## that_verb_clause_factive           -0.11  0.03  0.78 -0.10 -0.05 -0.09 -0.16
## ccomp_c                             0.11  0.14  0.54 -0.13 -0.11  0.05  0.12
## that_noun_clause                    0.04 -0.04  0.27  0.10 -0.02  0.08  0.06
## wh_question                         0.00  0.04 -0.19 -0.13 -0.02 -0.01 -0.01
## relcl_c                            -0.01  0.02 -0.16  0.85 -0.03 -0.11  0.14
## wh_relative_clause                 -0.10 -0.02 -0.03  0.75  0.06  0.30 -0.42
## wh_relative_subj_clause            -0.08 -0.01 -0.03  0.64  0.05  0.33 -0.38
## relcl_nominal                       0.44  0.22 -0.09  0.63 -0.02 -0.01  0.12
## dc_c                                0.05  0.23  0.35  0.47 -0.17 -0.04  0.13
## that_relative_clause                0.28  0.10 -0.10  0.38  0.15 -0.19  0.29
## wh_relative_prep_clause            -0.06 -0.03  0.03  0.29  0.04  0.00 -0.07
## jj_attributive                     -0.26  0.00 -0.13  0.01  0.61 -0.19  0.25
## amod_nominal                        0.02  0.04 -0.11  0.05  0.52 -0.09  0.25
## mltu                               -0.49 -0.29  0.16  0.26 -0.51 -0.11  0.12
## agentless_passive                  -0.24  0.14  0.08  0.02  0.37 -0.10 -0.15
## by_passive                         -0.06 -0.03 -0.02  0.06  0.27 -0.02 -0.06
## existence_verb                     -0.06  0.15  0.14  0.01  0.24 -0.14  0.03
## adverbial_subordinator_conditional  0.10  0.16  0.04 -0.01 -0.21 -0.13 -0.19
## occurrence_verb                     0.05  0.00  0.01  0.01  0.17  0.02  0.07
## nn_place                           -0.09 -0.09 -0.08 -0.07  0.16  0.06  0.10
## pp3                                 0.09 -0.03  0.02  0.05 -0.07  0.66 -0.13
## past_tense                          0.18 -0.22  0.09  0.03 -0.04  0.59 -0.23
## nn_animate                         -0.21 -0.03 -0.04  0.03 -0.11  0.56  0.08
## poss_nominal                       -0.13  0.19 -0.13 -0.02 -0.33  0.42  0.02
## nn_technical                       -0.12 -0.06 -0.01 -0.02  0.08 -0.27 -0.12
## nn_group                           -0.12 -0.11 -0.04  0.02  0.01  0.25  0.14
## perfect_aspect                      0.04  0.07  0.10  0.02  0.07  0.24 -0.05
## communication_verb                  0.01  0.19  0.09 -0.01 -0.09  0.20 -0.04
## nn_quantity                         0.01 -0.10 -0.01 -0.02 -0.07 -0.14 -0.09
## cc_nominal                         -0.14 -0.01 -0.10 -0.01  0.16 -0.02  0.47
## cc_phrase                          -0.40  0.03 -0.07 -0.03  0.14  0.05  0.43
## nn_concrete                        -0.12  0.02 -0.10  0.00 -0.03 -0.16 -0.18
## conjuncts_adverb                   -0.10 -0.02  0.10  0.05  0.02 -0.10 -0.14
##                                       h2   u2 com
## nonfinite_prop                     0.724 0.28 1.3
## contraction                        0.643 0.36 1.1
## cc_clause                          0.606 0.39 1.1
## emphatics                          0.548 0.45 1.1
## be_mv                              0.548 0.45 1.1
## wrd_length                         0.801 0.20 1.7
## pp_demonstrative                   0.556 0.44 1.1
## nn_all                             0.830 0.17 1.8
## pp3_it                             0.426 0.57 1.1
## pp1                                0.491 0.51 1.2
## mean_verbal_deps                   0.411 0.59 1.9
## nn_abstract                        0.365 0.63 1.5
## nominalization                     0.463 0.54 2.1
## factive_adverb                     0.325 0.68 1.4
## mattr                              0.298 0.70 1.8
## mlc                                0.686 0.31 3.0
## det_nominal                        0.358 0.64 2.2
## to_clause_noun                     0.212 0.79 2.2
## prep_phrase                        0.364 0.64 4.2
## adverbial_subordinator_causitive   0.164 0.84 1.4
## pp_indefinite                      0.169 0.83 1.9
## discourse_particle                 0.227 0.77 2.4
## time_adverbials                    0.209 0.79 2.6
## past_participial_clause            0.144 0.86 2.3
## complementizer_that0               0.199 0.80 2.9
## nn_cognitive                       0.119 0.88 1.9
## jj_predicative                     0.174 0.83 3.4
## amplifiers_adverb                  0.076 0.92 2.1
## place_adverbials                   0.130 0.87 3.0
## pv_do                              0.100 0.90 1.9
## all_phrasal_verbs                  0.054 0.95 3.2
## downtoners_adverb                  0.033 0.97 2.6
## verb                               0.891 0.11 1.2
## to_clause                          0.558 0.44 1.2
## non_past_tense                     0.822 0.18 1.8
## to_clause_verb                     0.402 0.60 1.5
## activity_verb                      0.309 0.69 2.5
## modal_possibility                  0.187 0.81 2.0
## to_clause_verb_desire              0.239 0.76 1.3
## pp2                                0.359 0.64 2.4
## mental_verb                        0.396 0.60 2.6
## mean_nominal_deps                  0.234 0.77 2.5
## prep_nominal                       0.255 0.74 3.2
## to_clause_verb_to_causative        0.096 0.90 1.9
## wh_clause                          0.107 0.89 2.1
## to_clause_adjective                0.060 0.94 1.8
## modal_predictive                   0.065 0.93 2.5
## causation_verb                     0.057 0.94 2.8
## split_aux                          0.089 0.91 2.3
## adverbial_subordinator_other       0.079 0.92 2.1
## aspectual_verb                     0.067 0.93 2.6
## that_verb_clause                   0.782 0.22 1.1
## that_complement_clause             0.850 0.15 1.0
## that_verb_clause_factive           0.553 0.45 1.2
## ccomp_c                            0.439 0.56 1.6
## that_noun_clause                   0.118 0.88 1.7
## wh_question                        0.065 0.93 1.9
## relcl_c                            0.719 0.28 1.2
## wh_relative_clause                 0.707 0.29 2.0
## wh_relative_subj_clause            0.560 0.44 2.3
## relcl_nominal                      0.721 0.28 2.2
## dc_c                               0.584 0.42 2.9
## that_relative_clause               0.321 0.68 4.2
## wh_relative_prep_clause            0.094 0.91 1.3
## jj_attributive                     0.543 0.46 2.1
## amod_nominal                       0.275 0.72 1.7
## mltu                               0.574 0.43 3.6
## agentless_passive                  0.217 0.78 2.8
## by_passive                         0.106 0.89 1.4
## existence_verb                     0.090 0.91 3.3
## adverbial_subordinator_conditional 0.178 0.82 4.2
## occurrence_verb                    0.036 0.96 1.5
## nn_place                           0.086 0.91 4.5
## pp3                                0.445 0.56 1.2
## past_tense                         0.413 0.59 1.9
## nn_animate                         0.331 0.67 1.4
## poss_nominal                       0.322 0.68 2.8
## nn_technical                       0.151 0.85 2.1
## nn_group                           0.123 0.88 2.7
## perfect_aspect                     0.117 0.88 2.0
## communication_verb                 0.124 0.88 2.9
## nn_quantity                        0.054 0.95 3.0
## cc_nominal                         0.279 0.72 1.5
## cc_phrase                          0.411 0.59 2.3
## nn_concrete                        0.104 0.90 3.5
## conjuncts_adverb                   0.048 0.95 3.9
## 
##                         PA1  PA3  PA4  PA2  PA7  PA5  PA6
## SS loadings           10.36 5.05 3.29 2.79 2.46 2.42 1.90
## Proportion Var         0.12 0.06 0.04 0.03 0.03 0.03 0.02
## Cumulative Var         0.12 0.18 0.22 0.25 0.28 0.31 0.33
## Proportion Explained   0.37 0.18 0.12 0.10 0.09 0.09 0.07
## Cumulative Proportion  0.37 0.55 0.66 0.76 0.85 0.93 1.00
## 
##  With factor correlations of 
##       PA1   PA3   PA4  PA2   PA7  PA5   PA6
## PA1  1.00  0.41  0.40 0.07 -0.12 0.19 -0.11
## PA3  0.41  1.00  0.26 0.01 -0.29 0.18  0.05
## PA4  0.40  0.26  1.00 0.26  0.17 0.14 -0.01
## PA2  0.07  0.01  0.26 1.00  0.23 0.13  0.21
## PA7 -0.12 -0.29  0.17 0.23  1.00 0.20 -0.12
## PA5  0.19  0.18  0.14 0.13  0.20 1.00  0.20
## PA6 -0.11  0.05 -0.01 0.21 -0.12 0.20  1.00
## 
## Mean item complexity =  2.2
## Test of the hypothesis that 7 factors are sufficient.
## 
## The degrees of freedom for the null model are  3655  and the objective function was  47.62 with Chi Square of  215834.4
## The degrees of freedom for the model are 3074  and the objective function was  20.21 
## 
## The root mean square of the residuals (RMSR) is  0.04 
## The df corrected root mean square of the residuals is  0.04 
## 
## The harmonic number of observations is  4563 with the empirical chi square  53053.04  with prob <  0 
## The total number of observations was  4563  with Likelihood Chi Square =  91524.66  with prob <  0 
## 
## Tucker Lewis Index of factoring reliability =  0.504
## RMSEA index =  0.079  and the 95 % confidence intervals are  0.079 0.08
## BIC =  65623.94
## Fit based upon off diagonal values = 0.95
## Measures of factor score adequacy             
##                                                    PA1  PA3  PA4  PA2  PA7  PA5
## Correlation of (regression) scores with factors   0.98 0.99 0.97 0.98 0.94 0.91
## Multiple R square of scores with factors          0.96 0.98 0.94 0.95 0.88 0.83
## Minimum correlation of possible factor scores     0.93 0.96 0.88 0.91 0.76 0.67
##                                                    PA6
## Correlation of (regression) scores with factors   0.91
## Multiple R square of scores with factors          0.83
## Minimum correlation of possible factor scores     0.66
PA7_reduced <- MD.reduce(PA7, .3, .15)
paged_table(PA7_reduced)
PA7_reduced %>%
  na.omit() %>%
  dim()
## [1] 54  3
PA7_scores <- MD.scores(PA7, PA7_reduced, btr_filtered)
paged_table(PA7_scores)

2.4.1 Dimension score: text-type * mode * learning_environment

MD.visualize(PA7, PA7_reduced, btr_filtered, x = text_type, color = mode, grid = ~mode+learning_environment, legend = F)
##                                  factor_loaded    loading communality
## nonfinite_prop                             PA1 -0.8235353   0.7244426
## contraction                                PA1  0.8186383   0.6431457
## cc_clause                                  PA1  0.7961354   0.6064587
## emphatics                                  PA1  0.7795884   0.5482211
## be_mv                                      PA1  0.7459612   0.5479498
## wrd_length                                 PA1 -0.7287759   0.8014540
## pp_demonstrative                           PA1  0.6948488   0.5562340
## nn_all                                     PA1 -0.6544929   0.8300732
## pp3_it                                     PA1  0.6386236   0.4259388
## pp1                                        PA1  0.6247616   0.4914034
## mean_verbal_deps                           PA1 -0.5596111   0.4105018
## nn_abstract                                PA1 -0.5310264   0.3650471
## nominalization                             PA1 -0.5275952   0.4631378
## factive_adverb                             PA1  0.5214767   0.3249478
## mattr                                      PA1 -0.5053301   0.2980419
## mlc                                        PA1 -0.4958467   0.6864842
## det_nominal                                PA1  0.4191834   0.3575159
## to_clause_noun                             PA1 -0.4157106   0.2123137
## prep_phrase                                PA1 -0.3286525   0.3639326
## adverbial_subordinator_causitive           PA1  0.3261532   0.1637591
## pp_indefinite                              PA1  0.3241266   0.1685381
## discourse_particle                         PA1  0.3163154   0.2265938
## time_adverbials                            PA1  0.3125315   0.2088412

## 
## 
##                         factor_loaded   loading communality
## relcl_c                           PA2 0.8461425   0.7192315
## wh_relative_clause                PA2 0.7474298   0.7072877
## wh_relative_subj_clause           PA2 0.6364631   0.5601752
## relcl_nominal                     PA2 0.6262247   0.7207488
## dc_c                              PA2 0.4661064   0.5837262
## that_relative_clause              PA2 0.3770284   0.3211368

## 
## 
##                       factor_loaded    loading communality
## verb                            PA3  0.8366189   0.8913202
## to_clause                       PA3  0.7636545   0.5577178
## non_past_tense                  PA3  0.7048001   0.8220477
## to_clause_verb                  PA3  0.5073746   0.4021488
## activity_verb                   PA3  0.4300264   0.3092743
## modal_possibility               PA3  0.4107499   0.1870198
## to_clause_verb_desire           PA3  0.4047185   0.2393614
## pp2                             PA3  0.3903002   0.3594773
## mental_verb                     PA3  0.3888151   0.3958738
## mean_nominal_deps               PA3 -0.3553521   0.2337824
## prep_nominal                    PA3 -0.3286380   0.2553694

## 
## 
##                          factor_loaded   loading communality
## that_verb_clause                   PA4 0.9243763   0.7824317
## that_complement_clause             PA4 0.9231932   0.8495170
## that_verb_clause_factive           PA4 0.7799246   0.5533175
## ccomp_c                            PA4 0.5438557   0.4391453

## 
## 
##              factor_loaded   loading communality
## pp3                    PA5 0.6636445   0.4447154
## past_tense             PA5 0.5888479   0.4128245
## nn_animate             PA5 0.5617818   0.3306635
## poss_nominal           PA5 0.4224054   0.3220701

## 
## 
##            factor_loaded   loading communality
## cc_nominal           PA6 0.4683098   0.2792157
## cc_phrase            PA6 0.4272668   0.4114733

## 
## 
##                   factor_loaded    loading communality
## jj_attributive              PA7  0.6090223   0.5425127
## amod_nominal                PA7  0.5180351   0.2752260
## mltu                        PA7 -0.5138994   0.5736179
## agentless_passive           PA7  0.3684972   0.2165383

2.4.2 Dimension score: Discipline by mode

MD.visualize(PA7, PA7_reduced, btr_filtered, x = discipline, color = discipline, grid = mode~learning_environment, legend = F)
##                                  factor_loaded    loading communality
## nonfinite_prop                             PA1 -0.8235353   0.7244426
## contraction                                PA1  0.8186383   0.6431457
## cc_clause                                  PA1  0.7961354   0.6064587
## emphatics                                  PA1  0.7795884   0.5482211
## be_mv                                      PA1  0.7459612   0.5479498
## wrd_length                                 PA1 -0.7287759   0.8014540
## pp_demonstrative                           PA1  0.6948488   0.5562340
## nn_all                                     PA1 -0.6544929   0.8300732
## pp3_it                                     PA1  0.6386236   0.4259388
## pp1                                        PA1  0.6247616   0.4914034
## mean_verbal_deps                           PA1 -0.5596111   0.4105018
## nn_abstract                                PA1 -0.5310264   0.3650471
## nominalization                             PA1 -0.5275952   0.4631378
## factive_adverb                             PA1  0.5214767   0.3249478
## mattr                                      PA1 -0.5053301   0.2980419
## mlc                                        PA1 -0.4958467   0.6864842
## det_nominal                                PA1  0.4191834   0.3575159
## to_clause_noun                             PA1 -0.4157106   0.2123137
## prep_phrase                                PA1 -0.3286525   0.3639326
## adverbial_subordinator_causitive           PA1  0.3261532   0.1637591
## pp_indefinite                              PA1  0.3241266   0.1685381
## discourse_particle                         PA1  0.3163154   0.2265938
## time_adverbials                            PA1  0.3125315   0.2088412

## 
## 
##                         factor_loaded   loading communality
## relcl_c                           PA2 0.8461425   0.7192315
## wh_relative_clause                PA2 0.7474298   0.7072877
## wh_relative_subj_clause           PA2 0.6364631   0.5601752
## relcl_nominal                     PA2 0.6262247   0.7207488
## dc_c                              PA2 0.4661064   0.5837262
## that_relative_clause              PA2 0.3770284   0.3211368

## 
## 
##                       factor_loaded    loading communality
## verb                            PA3  0.8366189   0.8913202
## to_clause                       PA3  0.7636545   0.5577178
## non_past_tense                  PA3  0.7048001   0.8220477
## to_clause_verb                  PA3  0.5073746   0.4021488
## activity_verb                   PA3  0.4300264   0.3092743
## modal_possibility               PA3  0.4107499   0.1870198
## to_clause_verb_desire           PA3  0.4047185   0.2393614
## pp2                             PA3  0.3903002   0.3594773
## mental_verb                     PA3  0.3888151   0.3958738
## mean_nominal_deps               PA3 -0.3553521   0.2337824
## prep_nominal                    PA3 -0.3286380   0.2553694

## 
## 
##                          factor_loaded   loading communality
## that_verb_clause                   PA4 0.9243763   0.7824317
## that_complement_clause             PA4 0.9231932   0.8495170
## that_verb_clause_factive           PA4 0.7799246   0.5533175
## ccomp_c                            PA4 0.5438557   0.4391453

## 
## 
##              factor_loaded   loading communality
## pp3                    PA5 0.6636445   0.4447154
## past_tense             PA5 0.5888479   0.4128245
## nn_animate             PA5 0.5617818   0.3306635
## poss_nominal           PA5 0.4224054   0.3220701

## 
## 
##            factor_loaded   loading communality
## cc_nominal           PA6 0.4683098   0.2792157
## cc_phrase            PA6 0.4272668   0.4114733

## 
## 
##                   factor_loaded    loading communality
## jj_attributive              PA7  0.6090223   0.5425127
## amod_nominal                PA7  0.5180351   0.2752260
## mltu                        PA7 -0.5138994   0.5736179
## agentless_passive           PA7  0.3684972   0.2165383

2.5 Eight-factor solution (Heywood case produced)

New analysis: Heywood cases. - PA with 8 factors explained 35% of the total variance - The root mean square of the residuals (RMSR) is 0.04 - The df corrected root mean square of the residuals is 0.04 - Tucker Lewis Index of factoring reliability = 0.521 - RMSEA index = 0.079 and the 95 % confidence intervals are 0.078 0.079

PA8 <- fa(btr_filtered[8:n_col],nfactors= 8, n.iter=1, rotate="Promax",  
          residuals=TRUE, SMC=TRUE, missing=TRUE,impute="median",
          min.err = 0.001,  max.iter = 50, symmetric=TRUE, warnings=TRUE, fm="pa",
          alpha=.05, p=.05, oblique.scores=TRUE)
#PA8 <- fa.sort(PA8, polar = FALSE)
#print(PA8)
#PA8_reduced <- MD.reduce(PA8, .3, .15)
#paged_table(PA8_reduced)
#PA8_reduced %>%
 # na.omit() %>%
  #dim()
#PA8_scores <- MD.scores(PA8, PA8_reduced, btr_filtered)
#paged_table(PA8_scores)

2.6 Summary of MDA

  • Biber et al. (2004) reported that their 4-factor solution accounted for 46.9% of the variance in T2K-SWAL corpus.
  • However, in this new MDA, the scree plot continues to drop until 6-factor solution, then started to flattens out. This means that Four-factor and five factor solutions may be underfactoring.
  • When Scree plot is examined again, which suggests that we could extract from 6 or 7 (see the plot reproduced below).
plot(screeplot)

  • Increasing the factor does not appear to solve the issue of explained variance (see Table below).
data.frame(VarianceExplained = c(PA6$Vaccounted[3,6], PA7$Vaccounted[3,7], PA8$Vaccounted[3,8]), row.names = c('6-factor', '7-factor', '8-factor'))
  • Then it may be the case that the corpus has a lot of new small variabilities and the dimensions are more fine-grained (up to 7 factor would be possible with Promax rotation).
  • We concluded that 6-factor solution was optimal.

2.7 Dimension score: Discipline by mode

MD.visualize(PA6, PA6_refined, btr_filtered, x = discipline, color = discipline, grid = mode~learning_environment, legend = F)
##                                  factor_loaded    loading communality
## nonfinite_prop                             PA1 -0.8132950   0.7253723
## cc_clause                                  PA1  0.7742338   0.5873482
## contraction                                PA1  0.7542997   0.6407286
## emphatics                                  PA1  0.7495712   0.5427764
## be_mv                                      PA1  0.7322963   0.5486265
## pp_demonstrative                           PA1  0.6335961   0.5565309
## nn_all                                     PA1 -0.6188822   0.8163238
## pp1                                        PA1  0.6131749   0.4810347
## pp3_it                                     PA1  0.5852639   0.4258781
## mean_verbal_deps                           PA1 -0.5684253   0.3699637
## factive_adverb                             PA1  0.5566128   0.3073590
## mlc                                        PA1 -0.4843049   0.5393716
## nn_abstract                                PA1 -0.4419376   0.3660817
## mltu                                       PA1 -0.4329341   0.3313769
## mattr                                      PA1 -0.4105833   0.2990983
## that_relative_clause                       PA1  0.3478179   0.3277435
## det_nominal                                PA1  0.3413118   0.3559768
## adverbial_subordinator_causitive           PA1  0.3082464   0.1632622

## 
## 
##                                    factor_loaded    loading communality
## wrd_length                                   PA2  0.5880763   0.7977080
## cc_phrase                                    PA2  0.5613025   0.4137876
## cc_nominal                                   PA2  0.5260028   0.2808598
## nominalization                               PA2  0.4980983   0.4664133
## jj_attributive                               PA2  0.4408125   0.4063362
## amod_nominal                                 PA2  0.3734195   0.1939523
## prep_phrase                                  PA2  0.3538703   0.3669912
## adverbial_subordinator_conditional           PA2 -0.3301145   0.1789314
## prep_nominal                                 PA2  0.3007542   0.2453117

## 
## 
##                       factor_loaded   loading communality
## to_clause                       PA3 0.8032894   0.5551529
## verb                            PA3 0.6946974   0.7949891
## non_past_tense                  PA3 0.6651300   0.7967250
## to_clause_verb                  PA3 0.5146971   0.3993298
## pp2                             PA3 0.5144921   0.3540182
## mental_verb                     PA3 0.5047000   0.3849005
## to_clause_verb_desire           PA3 0.4472352   0.2408542
## to_clause_noun                  PA3 0.4389121   0.2124316
## dc_c                            PA3 0.4125804   0.5075388
## activity_verb                   PA3 0.3906763   0.2960751
## poss_nominal                    PA3 0.3721704   0.2740686

## 
## 
##                          factor_loaded   loading communality
## that_verb_clause                   PA4 0.8855422   0.7373146
## that_complement_clause             PA4 0.8385487   0.7637723
## that_verb_clause_factive           PA4 0.7869121   0.5464035
## ccomp_c                            PA4 0.4685438   0.3955203

## 
## 
##            factor_loaded   loading communality
## pp3                  PA5 0.6110859   0.4314571
## past_tense           PA5 0.5767110   0.4043281
## nn_animate           PA5 0.4394147   0.2878991

## 
## 
##                         factor_loaded   loading communality
## wh_relative_clause                PA6 0.8238232   0.7104070
## relcl_c                           PA6 0.8071224   0.6962011
## wh_relative_subj_clause           PA6 0.7076527   0.5628490
## relcl_nominal                     PA6 0.5960646   0.7154814

3 Regression models — Differences by Mode.

RQ1) What are the linguistic features of technology-mediated learning environments that are encountered and produced?

RQ2) How (dis)similar are the linguistic features of technology-mediated learning environments and the academic registers represented by T2K-SWAL with regard to mode?

head(PA6_scores)
describeBy(PA6_scores, group = learning_environment)
## 
##  Descriptive statistics by group 
## group: tmle
##                       vars    n    mean      sd  median trimmed     mad    min
## filename*                1 4101 2051.00 1184.00 2051.00 2051.00 1519.66   1.00
## learning_environment*    2 4101    1.00    0.00    1.00    1.00    0.00   1.00
## mode*                    3 4101    1.46    0.50    1.00    1.44    0.00   1.00
## discipline*              4 4101    3.25    1.83    3.00    3.19    2.97   1.00
## subdiscipline*           5 4070  284.24  137.12  308.00  285.17  148.26   1.00
## text_type*               6 4101    4.52    1.61    5.00    4.59    0.00   1.00
## PA1                      7 4101   -0.68   11.12   -0.80   -0.51   12.73 -51.34
## PA3                      8 4101    0.02    6.65   -0.01   -0.06    6.40 -21.27
## PA4                      9 4101    0.04    3.49   -0.58   -0.36    2.99  -4.33
## PA2                     10 4101    0.17    5.29   -0.17    0.03    4.99 -23.30
## PA6                     11 4101    0.04    3.25   -0.32   -0.22    2.85  -4.56
## PA5                     12 4101   -0.07    2.23   -0.66   -0.37    1.94  -2.64
##                           max   range  skew kurtosis    se
## filename*             4101.00 4100.00  0.00    -1.20 18.49
## learning_environment*    1.00    0.00   NaN      NaN  0.00
## mode*                    2.00    1.00  0.18    -1.97  0.01
## discipline*              6.00    5.00  0.16    -1.36  0.03
## subdiscipline*         562.00  561.00 -0.07    -0.93  2.15
## text_type*               8.00    7.00 -0.54     0.55  0.03
## PA1                     31.68   83.02 -0.19    -0.22  0.17
## PA3                     34.01   55.28  0.20     0.60  0.10
## PA4                     30.39   34.72  1.57     4.64  0.05
## PA2                     23.90   47.21  0.22     0.67  0.08
## PA6                     31.52   36.08  1.53     7.07  0.05
## PA5                     14.02   16.66  1.36     2.32  0.03
## ------------------------------------------------------------ 
## group: traditional
##                       vars   n   mean     sd median trimmed    mad    min
## filename*                1 462 231.50 133.51 231.50  231.50 171.24   1.00
## learning_environment*    2 462   1.00   0.00   1.00    1.00   0.00   1.00
## mode*                    3 462   1.37   0.48   1.00    1.34   0.00   1.00
## discipline*              4 462   4.45   2.37   4.00    4.44   2.97   1.00
## subdiscipline*           5 462  37.22  22.65  38.00   36.96  32.62   1.00
## text_type*               6 462   5.87   2.76   5.00    5.94   2.97   1.00
## PA1                      7 462   6.01  11.80  10.12    6.48  11.48 -57.92
## PA3                      8 462  -0.19   4.96   0.18   -0.04   5.05 -13.32
## PA4                      9 462  -0.37   1.59  -0.38   -0.40   1.54  -3.86
## PA2                     10 462  -1.49   6.82  -3.47   -1.92   6.72 -13.05
## PA6                     11 462  -0.32   1.58  -0.47   -0.36   1.56  -4.56
## PA5                     12 462   0.63   1.85   0.37    0.47   1.89  -2.45
##                          max  range  skew kurtosis   se
## filename*             462.00 461.00  0.00    -1.21 6.21
## learning_environment*   1.00   0.00   NaN      NaN 0.00
## mode*                   2.00   1.00  0.53    -1.73 0.02
## discipline*             8.00   7.00  0.14    -1.15 0.11
## subdiscipline*         75.00  74.00  0.05    -1.31 1.05
## text_type*             10.00   9.00  0.08    -0.89 0.13
## PA1                    27.63  85.55 -0.62     0.32 0.55
## PA3                    15.23  28.55 -0.22    -0.33 0.23
## PA4                     4.32   8.17  0.19    -0.25 0.07
## PA2                    15.62  28.67  0.53    -0.76 0.32
## PA6                     6.17  10.72  0.37     0.45 0.07
## PA5                     9.29  11.75  0.92     1.27 0.09

3.1 Data wrangling

3.1.1 Transforming the dataset to long format

data_lengths <- dim(PA6_scores)

PA6_scores2 <- PA6_scores

for (r in 1:data_lengths[1]){
  if (PA6_scores2$text_type[r] == "classroom_management_talk" & PA6_scores2$learning_environment[r] == "tmle") {
    PA6_scores2$text_type[r] <- "classroom_management_talk_tmle"
  }
  if (PA6_scores2$text_type[r] == "classroom_management_talk" & PA6_scores2$learning_environment[r] == "traditional") {
    PA6_scores2$text_type[r] <- "classroom_management_talk_trad"
  }
}

unique(PA6_scores2$text_type)
##  [1] "instructional_reading"          "announcements_discussions"     
##  [3] "assignment_description"         "instructional_video"           
##  [5] "slides"                         "syllabus"                      
##  [7] "quiz"                           "classroom_management_talk_tmle"
##  [9] "textbooks"                      "course_packs"                  
## [11] "course_management"              "other_institutional_writing"   
## [13] "lecture"                        "service_encounter"             
## [15] "study_group"                    "lab"                           
## [17] "classroom_management_talk_trad" "office_hours"
attach(PA6_scores2)
head(PA6_scores2) 

3.1.2 Setting the reference category

PA6_scores2$text_type <- factor(PA6_scores2$text_type)
PA6_scores2$text_type <- factor(PA6_scores2$text_type, levels = rev(c("classroom_management_talk_tmle", "instructional_video",
                                                                  "classroom_management_talk_trad", "lab", "lecture"
,"office_hours"
,"service_encounter"
,"study_group"
,"announcements_discussions"
,"assignment_description"
,"instructional_reading"
,"quiz"
,"slides"
,"syllabus"
,"course_management"
,"course_packs"
,"other_institutional_writing"
,"textbooks")))

contrasts(PA6_scores2$text_type)
##                                other_institutional_writing course_packs
## textbooks                                                0            0
## other_institutional_writing                              1            0
## course_packs                                             0            1
## course_management                                        0            0
## syllabus                                                 0            0
## slides                                                   0            0
## quiz                                                     0            0
## instructional_reading                                    0            0
## assignment_description                                   0            0
## announcements_discussions                                0            0
## study_group                                              0            0
## service_encounter                                        0            0
## office_hours                                             0            0
## lecture                                                  0            0
## lab                                                      0            0
## classroom_management_talk_trad                           0            0
## instructional_video                                      0            0
## classroom_management_talk_tmle                           0            0
##                                course_management syllabus slides quiz
## textbooks                                      0        0      0    0
## other_institutional_writing                    0        0      0    0
## course_packs                                   0        0      0    0
## course_management                              1        0      0    0
## syllabus                                       0        1      0    0
## slides                                         0        0      1    0
## quiz                                           0        0      0    1
## instructional_reading                          0        0      0    0
## assignment_description                         0        0      0    0
## announcements_discussions                      0        0      0    0
## study_group                                    0        0      0    0
## service_encounter                              0        0      0    0
## office_hours                                   0        0      0    0
## lecture                                        0        0      0    0
## lab                                            0        0      0    0
## classroom_management_talk_trad                 0        0      0    0
## instructional_video                            0        0      0    0
## classroom_management_talk_tmle                 0        0      0    0
##                                instructional_reading assignment_description
## textbooks                                          0                      0
## other_institutional_writing                        0                      0
## course_packs                                       0                      0
## course_management                                  0                      0
## syllabus                                           0                      0
## slides                                             0                      0
## quiz                                               0                      0
## instructional_reading                              1                      0
## assignment_description                             0                      1
## announcements_discussions                          0                      0
## study_group                                        0                      0
## service_encounter                                  0                      0
## office_hours                                       0                      0
## lecture                                            0                      0
## lab                                                0                      0
## classroom_management_talk_trad                     0                      0
## instructional_video                                0                      0
## classroom_management_talk_tmle                     0                      0
##                                announcements_discussions study_group
## textbooks                                              0           0
## other_institutional_writing                            0           0
## course_packs                                           0           0
## course_management                                      0           0
## syllabus                                               0           0
## slides                                                 0           0
## quiz                                                   0           0
## instructional_reading                                  0           0
## assignment_description                                 0           0
## announcements_discussions                              1           0
## study_group                                            0           1
## service_encounter                                      0           0
## office_hours                                           0           0
## lecture                                                0           0
## lab                                                    0           0
## classroom_management_talk_trad                         0           0
## instructional_video                                    0           0
## classroom_management_talk_tmle                         0           0
##                                service_encounter office_hours lecture lab
## textbooks                                      0            0       0   0
## other_institutional_writing                    0            0       0   0
## course_packs                                   0            0       0   0
## course_management                              0            0       0   0
## syllabus                                       0            0       0   0
## slides                                         0            0       0   0
## quiz                                           0            0       0   0
## instructional_reading                          0            0       0   0
## assignment_description                         0            0       0   0
## announcements_discussions                      0            0       0   0
## study_group                                    0            0       0   0
## service_encounter                              1            0       0   0
## office_hours                                   0            1       0   0
## lecture                                        0            0       1   0
## lab                                            0            0       0   1
## classroom_management_talk_trad                 0            0       0   0
## instructional_video                            0            0       0   0
## classroom_management_talk_tmle                 0            0       0   0
##                                classroom_management_talk_trad
## textbooks                                                   0
## other_institutional_writing                                 0
## course_packs                                                0
## course_management                                           0
## syllabus                                                    0
## slides                                                      0
## quiz                                                        0
## instructional_reading                                       0
## assignment_description                                      0
## announcements_discussions                                   0
## study_group                                                 0
## service_encounter                                           0
## office_hours                                                0
## lecture                                                     0
## lab                                                         0
## classroom_management_talk_trad                              1
## instructional_video                                         0
## classroom_management_talk_tmle                              0
##                                instructional_video
## textbooks                                        0
## other_institutional_writing                      0
## course_packs                                     0
## course_management                                0
## syllabus                                         0
## slides                                           0
## quiz                                             0
## instructional_reading                            0
## assignment_description                           0
## announcements_discussions                        0
## study_group                                      0
## service_encounter                                0
## office_hours                                     0
## lecture                                          0
## lab                                              0
## classroom_management_talk_trad                   0
## instructional_video                              1
## classroom_management_talk_tmle                   0
##                                classroom_management_talk_tmle
## textbooks                                                   0
## other_institutional_writing                                 0
## course_packs                                                0
## course_management                                           0
## syllabus                                                    0
## slides                                                      0
## quiz                                                        0
## instructional_reading                                       0
## assignment_description                                      0
## announcements_discussions                                   0
## study_group                                                 0
## service_encounter                                           0
## office_hours                                                0
## lecture                                                     0
## lab                                                         0
## classroom_management_talk_trad                              0
## instructional_video                                         0
## classroom_management_talk_tmle                              1
head(PA6_scores2)

3.1.3 Making the data longer

dim_score_long <- reshape2::melt(PA6_scores2, ids = c("filename", "learning_environment", "mode", 
                                           "discipline", "subdiscipline", "text_type"),
                       value.name = "Dimensional_score",
                       variable.name = "Dimension")
head(dim_score_long)
dim_score_long$Dimension <- gsub("PA", 'DIM', dim_score_long$Dimension)


dim_score_long$Dimension <- factor(dim_score_long$Dimension, levels = c("DIM1", "DIM2", "DIM3", "DIM4", "DIM5", "DIM6"))
contrasts(dim_score_long$Dimension)
##      DIM2 DIM3 DIM4 DIM5 DIM6
## DIM1    0    0    0    0    0
## DIM2    1    0    0    0    0
## DIM3    0    1    0    0    0
## DIM4    0    0    1    0    0
## DIM5    0    0    0    1    0
## DIM6    0    0    0    0    1
contrasts(dim_score_long$text_type)
##                                other_institutional_writing course_packs
## textbooks                                                0            0
## other_institutional_writing                              1            0
## course_packs                                             0            1
## course_management                                        0            0
## syllabus                                                 0            0
## slides                                                   0            0
## quiz                                                     0            0
## instructional_reading                                    0            0
## assignment_description                                   0            0
## announcements_discussions                                0            0
## study_group                                              0            0
## service_encounter                                        0            0
## office_hours                                             0            0
## lecture                                                  0            0
## lab                                                      0            0
## classroom_management_talk_trad                           0            0
## instructional_video                                      0            0
## classroom_management_talk_tmle                           0            0
##                                course_management syllabus slides quiz
## textbooks                                      0        0      0    0
## other_institutional_writing                    0        0      0    0
## course_packs                                   0        0      0    0
## course_management                              1        0      0    0
## syllabus                                       0        1      0    0
## slides                                         0        0      1    0
## quiz                                           0        0      0    1
## instructional_reading                          0        0      0    0
## assignment_description                         0        0      0    0
## announcements_discussions                      0        0      0    0
## study_group                                    0        0      0    0
## service_encounter                              0        0      0    0
## office_hours                                   0        0      0    0
## lecture                                        0        0      0    0
## lab                                            0        0      0    0
## classroom_management_talk_trad                 0        0      0    0
## instructional_video                            0        0      0    0
## classroom_management_talk_tmle                 0        0      0    0
##                                instructional_reading assignment_description
## textbooks                                          0                      0
## other_institutional_writing                        0                      0
## course_packs                                       0                      0
## course_management                                  0                      0
## syllabus                                           0                      0
## slides                                             0                      0
## quiz                                               0                      0
## instructional_reading                              1                      0
## assignment_description                             0                      1
## announcements_discussions                          0                      0
## study_group                                        0                      0
## service_encounter                                  0                      0
## office_hours                                       0                      0
## lecture                                            0                      0
## lab                                                0                      0
## classroom_management_talk_trad                     0                      0
## instructional_video                                0                      0
## classroom_management_talk_tmle                     0                      0
##                                announcements_discussions study_group
## textbooks                                              0           0
## other_institutional_writing                            0           0
## course_packs                                           0           0
## course_management                                      0           0
## syllabus                                               0           0
## slides                                                 0           0
## quiz                                                   0           0
## instructional_reading                                  0           0
## assignment_description                                 0           0
## announcements_discussions                              1           0
## study_group                                            0           1
## service_encounter                                      0           0
## office_hours                                           0           0
## lecture                                                0           0
## lab                                                    0           0
## classroom_management_talk_trad                         0           0
## instructional_video                                    0           0
## classroom_management_talk_tmle                         0           0
##                                service_encounter office_hours lecture lab
## textbooks                                      0            0       0   0
## other_institutional_writing                    0            0       0   0
## course_packs                                   0            0       0   0
## course_management                              0            0       0   0
## syllabus                                       0            0       0   0
## slides                                         0            0       0   0
## quiz                                           0            0       0   0
## instructional_reading                          0            0       0   0
## assignment_description                         0            0       0   0
## announcements_discussions                      0            0       0   0
## study_group                                    0            0       0   0
## service_encounter                              1            0       0   0
## office_hours                                   0            1       0   0
## lecture                                        0            0       1   0
## lab                                            0            0       0   1
## classroom_management_talk_trad                 0            0       0   0
## instructional_video                            0            0       0   0
## classroom_management_talk_tmle                 0            0       0   0
##                                classroom_management_talk_trad
## textbooks                                                   0
## other_institutional_writing                                 0
## course_packs                                                0
## course_management                                           0
## syllabus                                                    0
## slides                                                      0
## quiz                                                        0
## instructional_reading                                       0
## assignment_description                                      0
## announcements_discussions                                   0
## study_group                                                 0
## service_encounter                                           0
## office_hours                                                0
## lecture                                                     0
## lab                                                         0
## classroom_management_talk_trad                              1
## instructional_video                                         0
## classroom_management_talk_tmle                              0
##                                instructional_video
## textbooks                                        0
## other_institutional_writing                      0
## course_packs                                     0
## course_management                                0
## syllabus                                         0
## slides                                           0
## quiz                                             0
## instructional_reading                            0
## assignment_description                           0
## announcements_discussions                        0
## study_group                                      0
## service_encounter                                0
## office_hours                                     0
## lecture                                          0
## lab                                              0
## classroom_management_talk_trad                   0
## instructional_video                              1
## classroom_management_talk_tmle                   0
##                                classroom_management_talk_tmle
## textbooks                                                   0
## other_institutional_writing                                 0
## course_packs                                                0
## course_management                                           0
## syllabus                                                    0
## slides                                                      0
## quiz                                                        0
## instructional_reading                                       0
## assignment_description                                      0
## announcements_discussions                                   0
## study_group                                                 0
## service_encounter                                           0
## office_hours                                                0
## lecture                                                     0
## lab                                                         0
## classroom_management_talk_trad                              0
## instructional_video                                         0
## classroom_management_talk_tmle                              1
xtabs(~text_type+learning_environment)
##                                 learning_environment
## text_type                        tmle traditional
##   announcements_discussions       326           0
##   assignment_description          355           0
##   classroom_management_talk_tmle   26           0
##   classroom_management_talk_trad    0          38
##   course_management                 0          21
##   course_packs                      0          27
##   instructional_reading           624           0
##   instructional_video            2207           0
##   lab                               0          17
##   lecture                           0         177
##   office_hours                      0          11
##   other_institutional_writing       0          37
##   quiz                            246           0
##   service_encounter                 0          22
##   slides                          138           0
##   study_group                       0          25
##   syllabus                        179           0
##   textbooks                         0          87
attach(dim_score_long)
xtabs(~discipline+learning_environment)
##                     learning_environment
## discipline           tmle traditional
##   business           6732         420
##   education          2796         246
##   engineering        4224         360
##   humanities         3510         450
##   natural_science    3018         444
##   other                 0         180
##   service_encounters    0         132
##   social_science     4326         540

3.2 General modeling strategy

3.2.1 Fixed-effects

We are interested in examining the differences among Dimension, learning environment and one of the three situational variable (here mode). - The question is if we center the binary variable of mode. This will make the main effects of learning_environment independent of the mode. This may not affect the results much IF we refer to the esmeans() since it will calculate all the

  1. Maximal model (Barr et al., 2013)
  • Random intercept, slopes, and their interactions, when it converges. This will not converge, but it is important to examine to guard against the type 1 error.

a includes all the parameters (1+dummy(Dimension, “PA1”)|filename) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)|discipline:mode) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)|text_type)

  1. Uncorrelated random intercepts and slopes

    (1+dummy(Dimension, “PA1”)||filename) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||discipline:mode) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||text_type)

b.1 excludes the intercepts for filename (0+dummy(Dimension, “PA1”)||filename) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||discipline:mode) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||text_type)

  1. No random slopes
  • c.1 removes slope for filename (1|filename) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||discipline:mode) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||text_type)

  • c.2 removes slope for discipline:mode (1+dummy(Dimension, “PA1”)||filename) + (1|discipline:mode) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||text_type)

  • c.3 removes slope for text_type (1+dummy(Dimension, “PA1”)||filename) + (1+dummy(Dimension,“PA1”)+dummy(learning_environment,“tmle”)||discipline:mode) + (1|text_type)

  • c.12, c.23, c.123 is also possible

3.3 Null model

m0 <- lmer(Dimensional_score ~ Dimension +  
        (1+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")*dummy(learning_environment,"tmle")*dummy(mode,"spoken")|discipline) + 
        (1+dummy(Dimension,"DIM1")*dummy(learning_environment,"tmle")*dummy(mode,"spoken")|text_type), 
        REML = F,data = dim_score_long)
summary(m0)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dimensional_score ~ Dimension + (1 + dummy(Dimension, "DIM1") |  
##     filename) + (1 + dummy(Dimension, "DIM1") * dummy(learning_environment,  
##     "tmle") * dummy(mode, "spoken") | discipline) + (1 + dummy(Dimension,  
##     "DIM1") * dummy(learning_environment, "tmle") * dummy(mode,  
##     "spoken") | text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162651.2 163325.1 -81243.6 162487.2    27296 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.9840 -0.4946 -0.0689  0.4272  7.8612 
## 
## Random effects:
##  Groups    
##  filename  
##            
##  text_type 
##            
##            
##            
##            
##            
##            
##            
##  discipline
##            
##            
##            
##            
##            
##            
##            
##  Residual  
##  Name                                                                              
##  (Intercept)                                                                       
##  dummy(Dimension, "DIM1")                                                          
##  (Intercept)                                                                       
##  dummy(Dimension, "DIM1")                                                          
##  dummy(learning_environment, "tmle")                                               
##  dummy(mode, "spoken")                                                             
##  dummy(Dimension, "DIM1"):dummy(learning_environment, "tmle")                      
##  dummy(Dimension, "DIM1"):dummy(mode, "spoken")                                    
##  dummy(learning_environment, "tmle"):dummy(mode, "spoken")                         
##  dummy(Dimension, "DIM1"):dummy(learning_environment, "tmle"):dummy(mode, "spoken")
##  (Intercept)                                                                       
##  dummy(Dimension, "DIM1")                                                          
##  dummy(learning_environment, "tmle")                                               
##  dummy(mode, "spoken")                                                             
##  dummy(Dimension, "DIM1"):dummy(learning_environment, "tmle")                      
##  dummy(Dimension, "DIM1"):dummy(mode, "spoken")                                    
##  dummy(learning_environment, "tmle"):dummy(mode, "spoken")                         
##  dummy(Dimension, "DIM1"):dummy(learning_environment, "tmle"):dummy(mode, "spoken")
##                                                                                    
##  Variance  Std.Dev. Corr                                     
##    0.04891  0.2212                                           
##   28.85071  5.3713  1.00                                     
##    0.04508  0.2123                                           
##    4.56662  2.1370   0.99                                    
##    1.20074  1.0958   0.37  0.27                              
##    0.74900  0.8654  -0.01 -0.13  0.81                        
##    6.86991  2.6211  -0.32 -0.42  0.59  0.94                  
##  663.03025 25.7494  -0.51 -0.40 -0.85 -0.84 -0.63            
##    5.69904  2.3873  -0.36 -0.24 -0.96 -0.91 -0.72  0.95      
##  103.06716 10.1522   0.53  0.43  0.84  0.83  0.62 -0.99 -0.94
##    0.43437  0.6591                                           
##    0.57163  0.7561  -0.46                                    
##    0.05899  0.2429  -0.40 -0.06                              
##    0.08712  0.2952  -0.55  0.47  0.85                        
##    0.26730  0.5170  -0.41  0.49  0.73  0.92                  
##    1.09916  1.0484  -0.39 -0.57  0.20 -0.17 -0.40            
##    0.21819  0.4671   0.45 -0.15 -0.95 -0.92 -0.90  0.02      
##    5.70962  2.3895  -0.49  0.29 -0.24 -0.07  0.14  0.02  0.02
##   18.65987  4.3197                                           
## Number of obs: 27378, groups:  filename, 4563; text_type, 18; discipline, 8
## 
## Fixed effects:
##               Estimate Std. Error      df t value Pr(>|t|)    
## (Intercept)    -7.7081     0.7177 13.0201  -10.74 7.70e-08 ***
## DimensionDIM2   7.7343     0.7061 12.7773   10.95 7.32e-08 ***
## DimensionDIM3   7.7343     0.7061 12.7773   10.95 7.32e-08 ***
## DimensionDIM4   7.7343     0.7061 12.7773   10.95 7.32e-08 ***
## DimensionDIM5   7.7343     0.7061 12.7773   10.95 7.32e-08 ***
## DimensionDIM6   7.7343     0.7061 12.7773   10.95 7.32e-08 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5
## DimensnDIM2 -0.943                            
## DimensnDIM3 -0.943  0.992                     
## DimensnDIM4 -0.943  0.992  0.992              
## DimensnDIM5 -0.943  0.992  0.992  0.992       
## DimensnDIM6 -0.943  0.992  0.992  0.992  0.992
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular

3.3.1 Baseline

m1.a <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (1+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")|text_type), REML = F,data = dim_score_long)

# This converged, although having singular fit
m1.a1 <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")|text_type), REML = F,data = dim_score_long)


summary(m1.a1)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##     "tmle") | discipline:mode) + (1 + dummy(Dimension, "DIM1") |  
##     text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162599.6 162788.6 -81276.8 162553.6    27355 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0953 -0.4947 -0.0683  0.4300  7.9327 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")             31.4796  5.6107       
##  text_type       (Intercept)                           0.2831  0.5320       
##                  dummy(Dimension, "DIM1")             10.1393  3.1842  0.23 
##  discipline:mode (Intercept)                           0.6495  0.8059       
##                  dummy(Dimension, "DIM1")            105.8455 10.2881  -0.81
##                  dummy(learning_environment, "tmle")   1.6405  1.2808  -0.69
##  Residual                                             18.6768  4.3217       
##       
##       
##       
##       
##       
##       
##   0.98
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                    -1.2242     3.2030 16.3295
## DimensionDIM2                                   1.1509     3.0229 16.7200
## DimensionDIM3                                   1.0048     3.0229 16.7200
## DimensionDIM4                                   1.0248     3.0229 16.7200
## DimensionDIM5                                   0.9128     3.0229 16.7200
## DimensionDIM6                                   1.0192     3.0229 16.7200
## learning_environmenttraditional                 5.8605     1.8337 14.6221
## DimensionDIM2:learning_environmenttraditional  -7.2769     1.7326 13.1189
## DimensionDIM3:learning_environmenttraditional  -5.8342     1.7326 13.1189
## DimensionDIM4:learning_environmenttraditional  -6.0317     1.7326 13.1189
## DimensionDIM5:learning_environmenttraditional  -4.9256     1.7326 13.1189
## DimensionDIM6:learning_environmenttraditional  -5.9762     1.7326 13.1189
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.382  0.70724   
## DimensionDIM2                                   0.381  0.70820   
## DimensionDIM3                                   0.332  0.74371   
## DimensionDIM4                                   0.339  0.73881   
## DimensionDIM5                                   0.302  0.76640   
## DimensionDIM6                                   0.337  0.74019   
## learning_environmenttraditional                 3.196  0.00618 **
## DimensionDIM2:learning_environmenttraditional  -4.200  0.00102 **
## DimensionDIM3:learning_environmenttraditional  -3.367  0.00499 **
## DimensionDIM4:learning_environmenttraditional  -3.481  0.00401 **
## DimensionDIM5:learning_environmenttraditional  -2.843  0.01374 * 
## DimensionDIM6:learning_environmenttraditional  -3.449  0.00426 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.996                                                        
## DimensnDIM3 -0.996  1.000                                                 
## DimensnDIM4 -0.996  1.000  1.000                                          
## DimensnDIM5 -0.996  1.000  1.000  1.000                                   
## DimensnDIM6 -0.996  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.481  0.476  0.476  0.476  0.476  0.476                     
## DmnsnDIM2:_  0.310 -0.316 -0.315 -0.315 -0.315 -0.315 -0.963              
## DmnsnDIM3:_  0.310 -0.315 -0.316 -0.315 -0.315 -0.315 -0.963  0.985       
## DmnsnDIM4:_  0.310 -0.315 -0.315 -0.316 -0.315 -0.315 -0.963  0.985  0.985
## DmnsnDIM5:_  0.310 -0.315 -0.315 -0.315 -0.316 -0.315 -0.963  0.985  0.985
## DmnsnDIM6:_  0.310 -0.315 -0.315 -0.315 -0.315 -0.316 -0.963  0.985  0.985
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.985       
## DmnsnDIM6:_  0.985  0.985
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
# removing correlated intercepts and slopes
m1.b.1 <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")|text_type), REML = F,data = dim_score_long)

# does not converge probably due to the no observation for servise encounter & in tmle => not justified by the design
m1.b.12 <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")|text_type), REML = F,data = dim_score_long)



m1.b.13 <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")||text_type), REML = F,data = dim_score_long)


summary(m1.b.12)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##     "tmle") | discipline:mode) + (1 + dummy(Dimension, "DIM1") |  
##     text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162599.6 162788.6 -81276.8 162553.6    27355 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0953 -0.4947 -0.0683  0.4300  7.9327 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")             31.4796  5.6107       
##  text_type       (Intercept)                           0.2831  0.5320       
##                  dummy(Dimension, "DIM1")             10.1393  3.1842  0.23 
##  discipline:mode (Intercept)                           0.6495  0.8059       
##                  dummy(Dimension, "DIM1")            105.8455 10.2881  -0.81
##                  dummy(learning_environment, "tmle")   1.6405  1.2808  -0.69
##  Residual                                             18.6768  4.3217       
##       
##       
##       
##       
##       
##       
##   0.98
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                    -1.2242     3.2030 16.3295
## DimensionDIM2                                   1.1509     3.0229 16.7200
## DimensionDIM3                                   1.0048     3.0229 16.7200
## DimensionDIM4                                   1.0248     3.0229 16.7200
## DimensionDIM5                                   0.9128     3.0229 16.7200
## DimensionDIM6                                   1.0192     3.0229 16.7200
## learning_environmenttraditional                 5.8605     1.8337 14.6221
## DimensionDIM2:learning_environmenttraditional  -7.2769     1.7326 13.1189
## DimensionDIM3:learning_environmenttraditional  -5.8342     1.7326 13.1189
## DimensionDIM4:learning_environmenttraditional  -6.0317     1.7326 13.1189
## DimensionDIM5:learning_environmenttraditional  -4.9256     1.7326 13.1189
## DimensionDIM6:learning_environmenttraditional  -5.9762     1.7326 13.1189
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.382  0.70724   
## DimensionDIM2                                   0.381  0.70820   
## DimensionDIM3                                   0.332  0.74371   
## DimensionDIM4                                   0.339  0.73881   
## DimensionDIM5                                   0.302  0.76640   
## DimensionDIM6                                   0.337  0.74019   
## learning_environmenttraditional                 3.196  0.00618 **
## DimensionDIM2:learning_environmenttraditional  -4.200  0.00102 **
## DimensionDIM3:learning_environmenttraditional  -3.367  0.00499 **
## DimensionDIM4:learning_environmenttraditional  -3.481  0.00401 **
## DimensionDIM5:learning_environmenttraditional  -2.843  0.01374 * 
## DimensionDIM6:learning_environmenttraditional  -3.449  0.00426 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.996                                                        
## DimensnDIM3 -0.996  1.000                                                 
## DimensnDIM4 -0.996  1.000  1.000                                          
## DimensnDIM5 -0.996  1.000  1.000  1.000                                   
## DimensnDIM6 -0.996  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.481  0.476  0.476  0.476  0.476  0.476                     
## DmnsnDIM2:_  0.310 -0.316 -0.315 -0.315 -0.315 -0.315 -0.963              
## DmnsnDIM3:_  0.310 -0.315 -0.316 -0.315 -0.315 -0.315 -0.963  0.985       
## DmnsnDIM4:_  0.310 -0.315 -0.315 -0.316 -0.315 -0.315 -0.963  0.985  0.985
## DmnsnDIM5:_  0.310 -0.315 -0.315 -0.315 -0.316 -0.315 -0.963  0.985  0.985
## DmnsnDIM6:_  0.310 -0.315 -0.315 -0.315 -0.315 -0.316 -0.963  0.985  0.985
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.985       
## DmnsnDIM6:_  0.985  0.985
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
summary(m1.b.13)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##     "tmle") | discipline:mode) + (1 + dummy(Dimension, "DIM1") ||  
##     text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162597.9 162778.7 -81277.0 162553.9    27356 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0955 -0.4945 -0.0683  0.4298  7.9335 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")             31.4816  5.6108       
##  text_type       dummy(Dimension, "DIM1")             10.4733  3.2362       
##  text_type.1     (Intercept)                           0.2896  0.5381       
##  discipline.mode (Intercept)                           0.5751  0.7584       
##                  dummy(Dimension, "DIM1")            105.8684 10.2892  -0.77
##                  dummy(learning_environment, "tmle")   1.2623  1.1235  -0.64
##  Residual                                             18.6763  4.3216       
##       
##       
##       
##       
##       
##       
##   0.98
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                    -1.2872     3.1750 16.4425
## DimensionDIM2                                   1.1762     3.0330 16.8584
## DimensionDIM3                                   1.0301     3.0330 16.8584
## DimensionDIM4                                   1.0501     3.0330 16.8584
## DimensionDIM5                                   0.9381     3.0330 16.8584
## DimensionDIM6                                   1.0445     3.0330 16.8584
## learning_environmenttraditional                 5.9302     1.8039 14.3435
## DimensionDIM2:learning_environmenttraditional  -7.3325     1.7641 13.3625
## DimensionDIM3:learning_environmenttraditional  -5.8898     1.7641 13.3625
## DimensionDIM4:learning_environmenttraditional  -6.0873     1.7641 13.3625
## DimensionDIM5:learning_environmenttraditional  -4.9812     1.7641 13.3625
## DimensionDIM6:learning_environmenttraditional  -6.0318     1.7641 13.3625
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.405  0.69039   
## DimensionDIM2                                   0.388  0.70302   
## DimensionDIM3                                   0.340  0.73833   
## DimensionDIM4                                   0.346  0.73346   
## DimensionDIM5                                   0.309  0.76088   
## DimensionDIM6                                   0.344  0.73482   
## learning_environmenttraditional                 3.287  0.00525 **
## DimensionDIM2:learning_environmenttraditional  -4.156  0.00107 **
## DimensionDIM3:learning_environmenttraditional  -3.339  0.00516 **
## DimensionDIM4:learning_environmenttraditional  -3.451  0.00415 **
## DimensionDIM5:learning_environmenttraditional  -2.824  0.01405 * 
## DimensionDIM6:learning_environmenttraditional  -3.419  0.00441 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.996                                                        
## DimensnDIM3 -0.996  1.000                                                 
## DimensnDIM4 -0.996  1.000  1.000                                          
## DimensnDIM5 -0.996  1.000  1.000  1.000                                   
## DimensnDIM6 -0.996  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.463  0.463  0.463  0.463  0.463  0.463                     
## DmnsnDIM2:_  0.309 -0.322 -0.321 -0.321 -0.321 -0.321 -0.966              
## DmnsnDIM3:_  0.309 -0.321 -0.322 -0.321 -0.321 -0.321 -0.966  0.986       
## DmnsnDIM4:_  0.309 -0.321 -0.321 -0.322 -0.321 -0.321 -0.966  0.986  0.986
## DmnsnDIM5:_  0.309 -0.321 -0.321 -0.321 -0.322 -0.321 -0.966  0.986  0.986
## DmnsnDIM6:_  0.309 -0.321 -0.321 -0.321 -0.321 -0.322 -0.966  0.986  0.986
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.986       
## DmnsnDIM6:_  0.986  0.986
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
#removing random
m1.c.1 <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (1|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")||discipline:mode) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")||text_type), REML = F,data = dim_score_long)


m1.c.2 <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1|discipline:mode) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")||text_type), REML = F,data = dim_score_long)

m1.c.2 <- lmer(Dimensional_score ~ Dimension * learning_environment +  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1|discipline:mode) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")||text_type), REML = F,data = dim_score_long)

summary(m1.b.1)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##     "tmle") | discipline:mode) + (1 + dummy(Dimension, "DIM1") |  
##     text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162599.6 162788.6 -81276.8 162553.6    27355 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0953 -0.4947 -0.0683  0.4300  7.9327 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")             31.4796  5.6107       
##  text_type       (Intercept)                           0.2831  0.5320       
##                  dummy(Dimension, "DIM1")             10.1393  3.1842  0.23 
##  discipline:mode (Intercept)                           0.6495  0.8059       
##                  dummy(Dimension, "DIM1")            105.8455 10.2881  -0.81
##                  dummy(learning_environment, "tmle")   1.6405  1.2808  -0.69
##  Residual                                             18.6768  4.3217       
##       
##       
##       
##       
##       
##       
##   0.98
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                    -1.2242     3.2030 16.3295
## DimensionDIM2                                   1.1509     3.0229 16.7200
## DimensionDIM3                                   1.0048     3.0229 16.7200
## DimensionDIM4                                   1.0248     3.0229 16.7200
## DimensionDIM5                                   0.9128     3.0229 16.7200
## DimensionDIM6                                   1.0192     3.0229 16.7200
## learning_environmenttraditional                 5.8605     1.8337 14.6221
## DimensionDIM2:learning_environmenttraditional  -7.2769     1.7326 13.1189
## DimensionDIM3:learning_environmenttraditional  -5.8342     1.7326 13.1189
## DimensionDIM4:learning_environmenttraditional  -6.0317     1.7326 13.1189
## DimensionDIM5:learning_environmenttraditional  -4.9256     1.7326 13.1189
## DimensionDIM6:learning_environmenttraditional  -5.9762     1.7326 13.1189
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.382  0.70724   
## DimensionDIM2                                   0.381  0.70820   
## DimensionDIM3                                   0.332  0.74371   
## DimensionDIM4                                   0.339  0.73881   
## DimensionDIM5                                   0.302  0.76640   
## DimensionDIM6                                   0.337  0.74019   
## learning_environmenttraditional                 3.196  0.00618 **
## DimensionDIM2:learning_environmenttraditional  -4.200  0.00102 **
## DimensionDIM3:learning_environmenttraditional  -3.367  0.00499 **
## DimensionDIM4:learning_environmenttraditional  -3.481  0.00401 **
## DimensionDIM5:learning_environmenttraditional  -2.843  0.01374 * 
## DimensionDIM6:learning_environmenttraditional  -3.449  0.00426 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.996                                                        
## DimensnDIM3 -0.996  1.000                                                 
## DimensnDIM4 -0.996  1.000  1.000                                          
## DimensnDIM5 -0.996  1.000  1.000  1.000                                   
## DimensnDIM6 -0.996  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.481  0.476  0.476  0.476  0.476  0.476                     
## DmnsnDIM2:_  0.310 -0.316 -0.315 -0.315 -0.315 -0.315 -0.963              
## DmnsnDIM3:_  0.310 -0.315 -0.316 -0.315 -0.315 -0.315 -0.963  0.985       
## DmnsnDIM4:_  0.310 -0.315 -0.315 -0.316 -0.315 -0.315 -0.963  0.985  0.985
## DmnsnDIM5:_  0.310 -0.315 -0.315 -0.315 -0.316 -0.315 -0.963  0.985  0.985
## DmnsnDIM6:_  0.310 -0.315 -0.315 -0.315 -0.315 -0.316 -0.963  0.985  0.985
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.985       
## DmnsnDIM6:_  0.985  0.985
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
summary(m1.b.12)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##     "tmle") | discipline:mode) + (1 + dummy(Dimension, "DIM1") |  
##     text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162599.6 162788.6 -81276.8 162553.6    27355 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0953 -0.4947 -0.0683  0.4300  7.9327 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")             31.4796  5.6107       
##  text_type       (Intercept)                           0.2831  0.5320       
##                  dummy(Dimension, "DIM1")             10.1393  3.1842  0.23 
##  discipline:mode (Intercept)                           0.6495  0.8059       
##                  dummy(Dimension, "DIM1")            105.8455 10.2881  -0.81
##                  dummy(learning_environment, "tmle")   1.6405  1.2808  -0.69
##  Residual                                             18.6768  4.3217       
##       
##       
##       
##       
##       
##       
##   0.98
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                    -1.2242     3.2030 16.3295
## DimensionDIM2                                   1.1509     3.0229 16.7200
## DimensionDIM3                                   1.0048     3.0229 16.7200
## DimensionDIM4                                   1.0248     3.0229 16.7200
## DimensionDIM5                                   0.9128     3.0229 16.7200
## DimensionDIM6                                   1.0192     3.0229 16.7200
## learning_environmenttraditional                 5.8605     1.8337 14.6221
## DimensionDIM2:learning_environmenttraditional  -7.2769     1.7326 13.1189
## DimensionDIM3:learning_environmenttraditional  -5.8342     1.7326 13.1189
## DimensionDIM4:learning_environmenttraditional  -6.0317     1.7326 13.1189
## DimensionDIM5:learning_environmenttraditional  -4.9256     1.7326 13.1189
## DimensionDIM6:learning_environmenttraditional  -5.9762     1.7326 13.1189
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.382  0.70724   
## DimensionDIM2                                   0.381  0.70820   
## DimensionDIM3                                   0.332  0.74371   
## DimensionDIM4                                   0.339  0.73881   
## DimensionDIM5                                   0.302  0.76640   
## DimensionDIM6                                   0.337  0.74019   
## learning_environmenttraditional                 3.196  0.00618 **
## DimensionDIM2:learning_environmenttraditional  -4.200  0.00102 **
## DimensionDIM3:learning_environmenttraditional  -3.367  0.00499 **
## DimensionDIM4:learning_environmenttraditional  -3.481  0.00401 **
## DimensionDIM5:learning_environmenttraditional  -2.843  0.01374 * 
## DimensionDIM6:learning_environmenttraditional  -3.449  0.00426 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.996                                                        
## DimensnDIM3 -0.996  1.000                                                 
## DimensnDIM4 -0.996  1.000  1.000                                          
## DimensnDIM5 -0.996  1.000  1.000  1.000                                   
## DimensnDIM6 -0.996  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.481  0.476  0.476  0.476  0.476  0.476                     
## DmnsnDIM2:_  0.310 -0.316 -0.315 -0.315 -0.315 -0.315 -0.963              
## DmnsnDIM3:_  0.310 -0.315 -0.316 -0.315 -0.315 -0.315 -0.963  0.985       
## DmnsnDIM4:_  0.310 -0.315 -0.315 -0.316 -0.315 -0.315 -0.963  0.985  0.985
## DmnsnDIM5:_  0.310 -0.315 -0.315 -0.315 -0.316 -0.315 -0.963  0.985  0.985
## DmnsnDIM6:_  0.310 -0.315 -0.315 -0.315 -0.315 -0.316 -0.963  0.985  0.985
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.985       
## DmnsnDIM6:_  0.985  0.985
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
summary(m1.c.1)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (1 | filename) +  
##     (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##         "tmle") || discipline:mode) + (1 + dummy(Dimension, "DIM1") +  
##     dummy(learning_environment, "tmle") || text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 164861.7 165026.0 -82410.8 164821.7    27358 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -9.1477 -0.4914 -0.0696  0.4624  7.3391 
## 
## Random effects:
##  Groups            Name                                Variance  Std.Dev.
##  filename          (Intercept)                           0.00000  0.0000 
##  text_type         dummy(learning_environment, "tmle")   0.96152  0.9806 
##  text_type.1       dummy(Dimension, "DIM1")             11.49391  3.3903 
##  text_type.2       (Intercept)                           0.02982  0.1727 
##  discipline.mode   dummy(learning_environment, "tmle")   0.00117  0.0342 
##  discipline.mode.1 dummy(Dimension, "DIM1")            100.34395 10.0172 
##  discipline.mode.2 (Intercept)                           0.44560  0.6675 
##  Residual                                               23.90589  4.8894 
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error     df
## (Intercept)                                     -1.782      3.006 17.782
## DimensionDIM2                                    1.253      2.982 17.227
## DimensionDIM3                                    1.107      2.982 17.227
## DimensionDIM4                                    1.127      2.982 17.227
## DimensionDIM5                                    1.015      2.982 17.227
## DimensionDIM6                                    1.121      2.982 17.227
## learning_environmenttraditional                  6.519      1.812 12.786
## DimensionDIM2:learning_environmenttraditional   -7.474      1.795 12.394
## DimensionDIM3:learning_environmenttraditional   -6.031      1.795 12.394
## DimensionDIM4:learning_environmenttraditional   -6.229      1.795 12.394
## DimensionDIM5:learning_environmenttraditional   -5.123      1.795 12.394
## DimensionDIM6:learning_environmenttraditional   -6.173      1.795 12.394
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.593  0.56062   
## DimensionDIM2                                   0.420  0.67956   
## DimensionDIM3                                   0.371  0.71503   
## DimensionDIM4                                   0.378  0.71013   
## DimensionDIM5                                   0.340  0.73773   
## DimensionDIM6                                   0.376  0.71151   
## learning_environmenttraditional                 3.598  0.00333 **
## DimensionDIM2:learning_environmenttraditional  -4.164  0.00123 **
## DimensionDIM3:learning_environmenttraditional  -3.360  0.00544 **
## DimensionDIM4:learning_environmenttraditional  -3.470  0.00442 **
## DimensionDIM5:learning_environmenttraditional  -2.854  0.01411 * 
## DimensionDIM6:learning_environmenttraditional  -3.439  0.00469 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.991                                                        
## DimensnDIM3 -0.991  0.999                                                 
## DimensnDIM4 -0.991  0.999  0.999                                          
## DimensnDIM5 -0.991  0.999  0.999  0.999                                   
## DimensnDIM6 -0.991  0.999  0.999  0.999  0.999                            
## lrnng_nvrnm -0.353  0.332  0.332  0.332  0.332  0.332                     
## DmnsnDIM2:_  0.333 -0.338 -0.337 -0.337 -0.337 -0.337 -0.970              
## DmnsnDIM3:_  0.333 -0.337 -0.338 -0.337 -0.337 -0.337 -0.970  0.982       
## DmnsnDIM4:_  0.333 -0.337 -0.337 -0.338 -0.337 -0.337 -0.970  0.982  0.982
## DmnsnDIM5:_  0.333 -0.337 -0.337 -0.337 -0.338 -0.337 -0.970  0.982  0.982
## DmnsnDIM6:_  0.333 -0.337 -0.337 -0.337 -0.337 -0.338 -0.970  0.982  0.982
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.982       
## DmnsnDIM6:_  0.982  0.982
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
summary(m1.c.2)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 | discipline:mode) + (1 + dummy(Dimension,  
##     "DIM1") + dummy(learning_environment, "tmle") || text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162889.4 163037.3 -81426.7 162853.4    27360 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.1100 -0.4967 -0.0680  0.4286  7.9313 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev.
##  filename        dummy(Dimension, "DIM1")             34.9884  5.9151 
##  text_type       dummy(learning_environment, "tmle")   0.8787  0.9374 
##  text_type.1     dummy(Dimension, "DIM1")            109.4684 10.4627 
##  text_type.2     (Intercept)                           0.1291  0.3592 
##  discipline.mode (Intercept)                           0.3344  0.5783 
##  Residual                                             18.6961  4.3239 
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                     -6.621      3.727  17.886
## DimensionDIM2                                    6.156      3.707  17.510
## DimensionDIM3                                    6.009      3.707  17.510
## DimensionDIM4                                    6.029      3.707  17.510
## DimensionDIM5                                    5.917      3.707  17.510
## DimensionDIM6                                    6.024      3.707  17.510
## learning_environmenttraditional                 13.397      5.004  17.935
## DimensionDIM2:learning_environmenttraditional  -14.478      4.994  17.799
## DimensionDIM3:learning_environmenttraditional  -13.035      4.994  17.799
## DimensionDIM4:learning_environmenttraditional  -13.232      4.994  17.799
## DimensionDIM5:learning_environmenttraditional  -12.126      4.994  17.799
## DimensionDIM6:learning_environmenttraditional  -13.177      4.994  17.799
##                                               t value Pr(>|t|)   
## (Intercept)                                    -1.776  0.09267 . 
## DimensionDIM2                                   1.660  0.11463   
## DimensionDIM3                                   1.621  0.12288   
## DimensionDIM4                                   1.626  0.12172   
## DimensionDIM5                                   1.596  0.12833   
## DimensionDIM6                                   1.625  0.12205   
## learning_environmenttraditional                 2.677  0.01541 * 
## DimensionDIM2:learning_environmenttraditional  -2.899  0.00965 **
## DimensionDIM3:learning_environmenttraditional  -2.610  0.01783 * 
## DimensionDIM4:learning_environmenttraditional  -2.650  0.01641 * 
## DimensionDIM5:learning_environmenttraditional  -2.428  0.02602 * 
## DimensionDIM6:learning_environmenttraditional  -2.638  0.01680 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.994                                                        
## DimensnDIM3 -0.994  1.000                                                 
## DimensnDIM4 -0.994  1.000  1.000                                          
## DimensnDIM5 -0.994  1.000  1.000  1.000                                   
## DimensnDIM6 -0.994  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.744  0.740  0.740  0.740  0.740  0.740                     
## DmnsnDIM2:_  0.738 -0.742 -0.742 -0.742 -0.742 -0.742 -0.996              
## DmnsnDIM3:_  0.738 -0.742 -0.742 -0.742 -0.742 -0.742 -0.996  0.998       
## DmnsnDIM4:_  0.738 -0.742 -0.742 -0.742 -0.742 -0.742 -0.996  0.998  0.998
## DmnsnDIM5:_  0.738 -0.742 -0.742 -0.742 -0.742 -0.742 -0.996  0.998  0.998
## DmnsnDIM6:_  0.738 -0.742 -0.742 -0.742 -0.742 -0.742 -0.996  0.998  0.998
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.998       
## DmnsnDIM6:_  0.998  0.998
contrastm2 <- emmeans(m1.a1, pairwise ~ learning_environment | Dimension, adjust = "tukey")
contrastm2
## $emmeans
## Dimension = DIM1:
##  learning_environment  emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 -1.2242 3.203 Inf    -7.502     5.054
##  traditional           4.6364 2.823 Inf    -0.897    10.170
## 
## Dimension = DIM2:
##  learning_environment  emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 -0.0733 0.339 Inf    -0.737     0.591
##  traditional          -1.4897 0.347 Inf    -2.169    -0.810
## 
## Dimension = DIM3:
##  learning_environment  emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 -0.2193 0.339 Inf    -0.883     0.445
##  traditional          -0.1930 0.347 Inf    -0.873     0.487
## 
## Dimension = DIM4:
##  learning_environment  emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 -0.1993 0.339 Inf    -0.863     0.465
##  traditional          -0.3705 0.347 Inf    -1.050     0.309
## 
## Dimension = DIM5:
##  learning_environment  emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 -0.3113 0.339 Inf    -0.975     0.353
##  traditional           0.6236 0.347 Inf    -0.056     1.303
## 
## Dimension = DIM6:
##  learning_environment  emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 -0.2050 0.339 Inf    -0.869     0.459
##  traditional          -0.3206 0.347 Inf    -1.000     0.359
## 
## Degrees-of-freedom method: asymptotic 
## Confidence level used: 0.95 
## 
## $contrasts
## Dimension = DIM1:
##  contrast           estimate    SE  df z.ratio p.value
##  tmle - traditional  -5.8605 1.834 Inf -3.196  0.0014 
## 
## Dimension = DIM2:
##  contrast           estimate    SE  df z.ratio p.value
##  tmle - traditional   1.4164 0.497 Inf  2.850  0.0044 
## 
## Dimension = DIM3:
##  contrast           estimate    SE  df z.ratio p.value
##  tmle - traditional  -0.0263 0.497 Inf -0.053  0.9577 
## 
## Dimension = DIM4:
##  contrast           estimate    SE  df z.ratio p.value
##  tmle - traditional   0.1712 0.497 Inf  0.344  0.7305 
## 
## Dimension = DIM5:
##  contrast           estimate    SE  df z.ratio p.value
##  tmle - traditional  -0.9349 0.497 Inf -1.881  0.0600 
## 
## Dimension = DIM6:
##  contrast           estimate    SE  df z.ratio p.value
##  tmle - traditional   0.1157 0.497 Inf  0.233  0.8160 
## 
## Degrees-of-freedom method: asymptotic
emmeans::eff_size(contrastm2, sigma = sigma(m1.a1), edf = Inf)
## Dimension = DIM1:
##  contrast             effect.size    SE  df asymp.LCL asymp.UCL
##  (tmle - traditional)    -1.35608 0.424 Inf    -2.188   -0.5244
## 
## Dimension = DIM2:
##  contrast             effect.size    SE  df asymp.LCL asymp.UCL
##  (tmle - traditional)     0.32774 0.115 Inf     0.102    0.5532
## 
## Dimension = DIM3:
##  contrast             effect.size    SE  df asymp.LCL asymp.UCL
##  (tmle - traditional)    -0.00609 0.115 Inf    -0.232    0.2193
## 
## Dimension = DIM4:
##  contrast             effect.size    SE  df asymp.LCL asymp.UCL
##  (tmle - traditional)     0.03961 0.115 Inf    -0.186    0.2650
## 
## Dimension = DIM5:
##  contrast             effect.size    SE  df asymp.LCL asymp.UCL
##  (tmle - traditional)    -0.21633 0.115 Inf    -0.442    0.0091
## 
## Dimension = DIM6:
##  contrast             effect.size    SE  df asymp.LCL asymp.UCL
##  (tmle - traditional)     0.02677 0.115 Inf    -0.199    0.2522
## 
## sigma used for effect sizes: 4.322 
## Degrees-of-freedom method: inherited from asymptotic when re-gridding 
## Confidence level used: 0.95
emmip(m1.a1, learning_environment ~ Dimension, CIs = T, adjust = "tukey")

3.4 Mode comparisons

3.4.1 Model building

# This converged, although having singular fit
mode_1.baseline <- lmer(Dimensional_score ~ Dimension * learning_environment+  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")|text_type), REML = F,data = dim_score_long)
summary(mode_1.baseline)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##     "tmle") | discipline:mode) + (1 + dummy(Dimension, "DIM1") |  
##     text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 162599.6 162788.6 -81276.8 162553.6    27355 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0953 -0.4947 -0.0683  0.4300  7.9327 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")             31.4796  5.6107       
##  text_type       (Intercept)                           0.2831  0.5320       
##                  dummy(Dimension, "DIM1")             10.1393  3.1842  0.23 
##  discipline:mode (Intercept)                           0.6495  0.8059       
##                  dummy(Dimension, "DIM1")            105.8455 10.2881  -0.81
##                  dummy(learning_environment, "tmle")   1.6405  1.2808  -0.69
##  Residual                                             18.6768  4.3217       
##       
##       
##       
##       
##       
##       
##   0.98
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                    -1.2242     3.2030 16.3295
## DimensionDIM2                                   1.1509     3.0229 16.7200
## DimensionDIM3                                   1.0048     3.0229 16.7200
## DimensionDIM4                                   1.0248     3.0229 16.7200
## DimensionDIM5                                   0.9128     3.0229 16.7200
## DimensionDIM6                                   1.0192     3.0229 16.7200
## learning_environmenttraditional                 5.8605     1.8337 14.6221
## DimensionDIM2:learning_environmenttraditional  -7.2769     1.7326 13.1189
## DimensionDIM3:learning_environmenttraditional  -5.8342     1.7326 13.1189
## DimensionDIM4:learning_environmenttraditional  -6.0317     1.7326 13.1189
## DimensionDIM5:learning_environmenttraditional  -4.9256     1.7326 13.1189
## DimensionDIM6:learning_environmenttraditional  -5.9762     1.7326 13.1189
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.382  0.70724   
## DimensionDIM2                                   0.381  0.70820   
## DimensionDIM3                                   0.332  0.74371   
## DimensionDIM4                                   0.339  0.73881   
## DimensionDIM5                                   0.302  0.76640   
## DimensionDIM6                                   0.337  0.74019   
## learning_environmenttraditional                 3.196  0.00618 **
## DimensionDIM2:learning_environmenttraditional  -4.200  0.00102 **
## DimensionDIM3:learning_environmenttraditional  -3.367  0.00499 **
## DimensionDIM4:learning_environmenttraditional  -3.481  0.00401 **
## DimensionDIM5:learning_environmenttraditional  -2.843  0.01374 * 
## DimensionDIM6:learning_environmenttraditional  -3.449  0.00426 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.996                                                        
## DimensnDIM3 -0.996  1.000                                                 
## DimensnDIM4 -0.996  1.000  1.000                                          
## DimensnDIM5 -0.996  1.000  1.000  1.000                                   
## DimensnDIM6 -0.996  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.481  0.476  0.476  0.476  0.476  0.476                     
## DmnsnDIM2:_  0.310 -0.316 -0.315 -0.315 -0.315 -0.315 -0.963              
## DmnsnDIM3:_  0.310 -0.315 -0.316 -0.315 -0.315 -0.315 -0.963  0.985       
## DmnsnDIM4:_  0.310 -0.315 -0.315 -0.316 -0.315 -0.315 -0.963  0.985  0.985
## DmnsnDIM5:_  0.310 -0.315 -0.315 -0.315 -0.316 -0.315 -0.963  0.985  0.985
## DmnsnDIM6:_  0.310 -0.315 -0.315 -0.315 -0.315 -0.316 -0.963  0.985  0.985
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.985       
## DmnsnDIM6:_  0.985  0.985
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
mode_1.baseline <- lmer(Dimensional_score ~ Dimension * learning_environment+  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")|text_type), REML = F,data = dim_score_long,
        lmerControl(optimizer = "bobyqa"))
summary(mode_1.baseline)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: 
## Dimensional_score ~ Dimension * learning_environment + (0 + dummy(Dimension,  
##     "DIM1") | filename) + (1 + dummy(Dimension, "DIM1") + dummy(learning_environment,  
##     "tmle") | discipline:mode) + (1 + dummy(Dimension, "DIM1") |  
##     text_type)
##    Data: dim_score_long
## Control: lmerControl(optimizer = "bobyqa")
## 
##      AIC      BIC   logLik deviance df.resid 
## 162599.6 162788.6 -81276.8 162553.6    27355 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.0953 -0.4947 -0.0683  0.4300  7.9327 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")             31.4796  5.611        
##  text_type       (Intercept)                           0.2831  0.532        
##                  dummy(Dimension, "DIM1")             10.1377  3.184   0.23 
##  discipline:mode (Intercept)                           0.6496  0.806        
##                  dummy(Dimension, "DIM1")            105.8412 10.288   -0.81
##                  dummy(learning_environment, "tmle")   1.6405  1.281   -0.69
##  Residual                                             18.6768  4.322        
##       
##       
##       
##       
##       
##       
##   0.98
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                               Estimate Std. Error      df
## (Intercept)                                    -1.2242     3.2029 16.3296
## DimensionDIM2                                   1.1509     3.0228 16.7200
## DimensionDIM3                                   1.0048     3.0228 16.7200
## DimensionDIM4                                   1.0248     3.0228 16.7200
## DimensionDIM5                                   0.9129     3.0228 16.7200
## DimensionDIM6                                   1.0192     3.0228 16.7200
## learning_environmenttraditional                 5.8606     1.8336 14.6245
## DimensionDIM2:learning_environmenttraditional  -7.2769     1.7325 13.1213
## DimensionDIM3:learning_environmenttraditional  -5.8342     1.7325 13.1213
## DimensionDIM4:learning_environmenttraditional  -6.0317     1.7325 13.1213
## DimensionDIM5:learning_environmenttraditional  -4.9256     1.7325 13.1213
## DimensionDIM6:learning_environmenttraditional  -5.9762     1.7325 13.1213
##                                               t value Pr(>|t|)   
## (Intercept)                                    -0.382  0.70723   
## DimensionDIM2                                   0.381  0.70819   
## DimensionDIM3                                   0.332  0.74370   
## DimensionDIM4                                   0.339  0.73880   
## DimensionDIM5                                   0.302  0.76639   
## DimensionDIM6                                   0.337  0.74018   
## learning_environmenttraditional                 3.196  0.00617 **
## DimensionDIM2:learning_environmenttraditional  -4.200  0.00102 **
## DimensionDIM3:learning_environmenttraditional  -3.368  0.00499 **
## DimensionDIM4:learning_environmenttraditional  -3.482  0.00400 **
## DimensionDIM5:learning_environmenttraditional  -2.843  0.01373 * 
## DimensionDIM6:learning_environmenttraditional  -3.450  0.00426 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) DmDIM2 DmDIM3 DmDIM4 DmDIM5 DmDIM6 lrnng_ DDIM2: DDIM3:
## DimensnDIM2 -0.996                                                        
## DimensnDIM3 -0.996  1.000                                                 
## DimensnDIM4 -0.996  1.000  1.000                                          
## DimensnDIM5 -0.996  1.000  1.000  1.000                                   
## DimensnDIM6 -0.996  1.000  1.000  1.000  1.000                            
## lrnng_nvrnm -0.481  0.476  0.476  0.476  0.476  0.476                     
## DmnsnDIM2:_  0.310 -0.316 -0.315 -0.315 -0.315 -0.315 -0.963              
## DmnsnDIM3:_  0.310 -0.315 -0.316 -0.315 -0.315 -0.315 -0.963  0.985       
## DmnsnDIM4:_  0.310 -0.315 -0.315 -0.316 -0.315 -0.315 -0.963  0.985  0.985
## DmnsnDIM5:_  0.310 -0.315 -0.315 -0.315 -0.316 -0.315 -0.963  0.985  0.985
## DmnsnDIM6:_  0.310 -0.315 -0.315 -0.315 -0.315 -0.316 -0.963  0.985  0.985
##             DDIM4: DDIM5:
## DimensnDIM2              
## DimensnDIM3              
## DimensnDIM4              
## DimensnDIM5              
## DimensnDIM6              
## lrnng_nvrnm              
## DmnsnDIM2:_              
## DmnsnDIM3:_              
## DmnsnDIM4:_              
## DmnsnDIM5:_  0.985       
## DmnsnDIM6:_  0.985  0.985
## optimizer (bobyqa) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular
# This converged, although having singular fit
mode_1.a1 <- lmer(Dimensional_score ~ Dimension * learning_environment * mode+  
        (0+dummy(Dimension, "DIM1")|filename) + 
        (1+dummy(Dimension,"DIM1")+dummy(learning_environment,"tmle")|discipline:mode)+ 
        (1+dummy(Dimension,"DIM1")|text_type), REML = F,data = dim_score_long)
summary(mode_1.a1)
## Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
##   method [lmerModLmerTest]
## Formula: Dimensional_score ~ Dimension * learning_environment * mode +  
##     (0 + dummy(Dimension, "DIM1") | filename) + (1 + dummy(Dimension,  
##     "DIM1") + dummy(learning_environment, "tmle") | discipline:mode) +  
##     (1 + dummy(Dimension, "DIM1") | text_type)
##    Data: dim_score_long
## 
##      AIC      BIC   logLik deviance df.resid 
## 159756.0 160043.6 -79843.0 159686.0    27343 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.9791 -0.4889 -0.0665  0.4131  8.8047 
## 
## Random effects:
##  Groups          Name                                Variance Std.Dev. Corr 
##  filename        dummy(Dimension, "DIM1")            33.63377 5.7995        
##  text_type       (Intercept)                          0.27146 0.5210        
##                  dummy(Dimension, "DIM1")             4.21919 2.0541   0.26 
##  discipline:mode (Intercept)                          0.34614 0.5883        
##                  dummy(Dimension, "DIM1")             4.35269 2.0863   -0.80
##                  dummy(learning_environment, "tmle")  0.01168 0.1081    0.25
##  Residual                                            16.51413 4.0638        
##       
##       
##       
##       
##       
##       
##   0.37
##       
## Number of obs: 27378, groups:  
## filename, 4563; text_type, 18; discipline:mode, 14
## 
## Fixed effects:
##                                                           Estimate Std. Error
## (Intercept)                                                  5.902      1.841
## DimensionDIM2                                               -7.147      1.788
## DimensionDIM3                                               -4.154      1.788
## DimensionDIM4                                               -4.771      1.788
## DimensionDIM5                                               -5.673      1.788
## DimensionDIM6                                               -5.132      1.788
## learning_environmenttraditional                             10.299      2.037
## modewritten                                                -16.263      2.173
## DimensionDIM2:learning_environmenttraditional              -15.287      1.917
## DimensionDIM3:learning_environmenttraditional               -9.700      1.917
## DimensionDIM4:learning_environmenttraditional              -11.542      1.917
## DimensionDIM5:learning_environmenttraditional              -10.044      1.917
## DimensionDIM6:learning_environmenttraditional              -11.746      1.917
## DimensionDIM2:modewritten                                   19.071      2.147
## DimensionDIM3:modewritten                                   12.180      2.147
## DimensionDIM4:modewritten                                   13.577      2.147
## DimensionDIM5:modewritten                                   15.313      2.147
## DimensionDIM6:modewritten                                   14.357      2.147
## learning_environmenttraditional:modewritten                 -8.254      2.594
## DimensionDIM2:learning_environmenttraditional:modewritten   17.839      2.456
## DimensionDIM3:learning_environmenttraditional:modewritten    5.167      2.456
## DimensionDIM4:learning_environmenttraditional:modewritten    9.894      2.456
## DimensionDIM5:learning_environmenttraditional:modewritten    9.232      2.456
## DimensionDIM6:learning_environmenttraditional:modewritten   10.766      2.456
##                                                                df t value
## (Intercept)                                                21.332   3.205
## DimensionDIM2                                              22.124  -3.996
## DimensionDIM3                                              22.124  -2.323
## DimensionDIM4                                              22.124  -2.667
## DimensionDIM5                                              22.124  -3.172
## DimensionDIM6                                              22.124  -2.869
## learning_environmenttraditional                            18.260   5.055
## modewritten                                                22.707  -7.483
## DimensionDIM2:learning_environmenttraditional              17.164  -7.973
## DimensionDIM3:learning_environmenttraditional              17.164  -5.059
## DimensionDIM4:learning_environmenttraditional              17.164  -6.019
## DimensionDIM5:learning_environmenttraditional              17.164  -5.238
## DimensionDIM6:learning_environmenttraditional              17.164  -6.126
## DimensionDIM2:modewritten                                  24.139   8.881
## DimensionDIM3:modewritten                                  24.139   5.672
## DimensionDIM4:modewritten                                  24.139   6.323
## DimensionDIM5:modewritten                                  24.139   7.131
## DimensionDIM6:modewritten                                  24.139   6.686
## learning_environmenttraditional:modewritten                18.495  -3.181
## DimensionDIM2:learning_environmenttraditional:modewritten  17.934   7.263
## DimensionDIM3:learning_environmenttraditional:modewritten  17.934   2.104
## DimensionDIM4:learning_environmenttraditional:modewritten  17.934   4.028
## DimensionDIM5:learning_environmenttraditional:modewritten  17.934   3.759
## DimensionDIM6:learning_environmenttraditional:modewritten  17.934   4.383
##                                                           Pr(>|t|)    
## (Intercept)                                               0.004192 ** 
## DimensionDIM2                                             0.000604 ***
## DimensionDIM3                                             0.029771 *  
## DimensionDIM4                                             0.014029 *  
## DimensionDIM5                                             0.004392 ** 
## DimensionDIM6                                             0.008881 ** 
## learning_environmenttraditional                           7.90e-05 ***
## modewritten                                               1.44e-07 ***
## DimensionDIM2:learning_environmenttraditional             3.58e-07 ***
## DimensionDIM3:learning_environmenttraditional             9.41e-05 ***
## DimensionDIM4:learning_environmenttraditional             1.33e-05 ***
## DimensionDIM5:learning_environmenttraditional             6.47e-05 ***
## DimensionDIM6:learning_environmenttraditional             1.07e-05 ***
## DimensionDIM2:modewritten                                 4.50e-09 ***
## DimensionDIM3:modewritten                                 7.51e-06 ***
## DimensionDIM4:modewritten                                 1.51e-06 ***
## DimensionDIM5:modewritten                                 2.19e-07 ***
## DimensionDIM6:modewritten                                 6.28e-07 ***
## learning_environmenttraditional:modewritten               0.005040 ** 
## DimensionDIM2:learning_environmenttraditional:modewritten 9.65e-07 ***
## DimensionDIM3:learning_environmenttraditional:modewritten 0.049798 *  
## DimensionDIM4:learning_environmenttraditional:modewritten 0.000793 ***
## DimensionDIM5:learning_environmenttraditional:modewritten 0.001446 ** 
## DimensionDIM6:learning_environmenttraditional:modewritten 0.000361 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see ?isSingular

3.4.2 Model comparison

anova(mode_1.baseline, mode_1.a1)

3.4.3 Delta BIC and AIC

BIC(mode_1.a1) -BIC(mode_1.baseline) 
## [1] -2745.014
AIC(mode_1.a1) - AIC(mode_1.baseline) 
## [1] -2843.624

3.4.4 R squares

MuMIn::r.squaredGLMM(mode_1.a1)
##            R2m       R2c
## [1,] 0.3934584 0.5825638

3.4.5 Post-hoc comparison

mm_mode <- emmeans(mode_1.a1,  pairwise~ learning_environment*mode |Dimension, adjust = "Tukey")
mm_mode
## $emmeans
## Dimension = DIM1:
##  learning_environment mode      emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 spoken    5.9020 1.841 Inf     2.293    9.5109
##  traditional          spoken   16.2006 1.249 Inf    13.752   18.6492
##  tmle                 written -10.3613 1.155 Inf   -12.624   -8.0983
##  traditional          written  -8.3162 1.423 Inf   -11.105   -5.5273
## 
## Dimension = DIM2:
##  learning_environment mode      emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 spoken   -1.2449 0.477 Inf    -2.180   -0.3095
##  traditional          spoken   -6.2335 0.402 Inf    -7.021   -5.4459
##  tmle                 written   1.5624 0.338 Inf     0.899    2.2254
##  traditional          written   6.1589 0.468 Inf     5.242    7.0754
## 
## Dimension = DIM3:
##  learning_environment mode      emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 spoken    1.7476 0.477 Inf     0.812    2.6830
##  traditional          spoken    2.3461 0.402 Inf     1.558    3.1337
##  tmle                 written  -2.3354 0.338 Inf    -2.998   -1.6724
##  traditional          written  -4.8239 0.468 Inf    -5.740   -3.9073
## 
## Dimension = DIM4:
##  learning_environment mode      emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 spoken    1.1314 0.477 Inf     0.196    2.0668
##  traditional          spoken   -0.1116 0.402 Inf    -0.899    0.6761
##  tmle                 written  -1.5549 0.338 Inf    -2.218   -0.8919
##  traditional          written  -1.1570 0.468 Inf    -2.074   -0.2405
## 
## Dimension = DIM5:
##  learning_environment mode      emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 spoken    0.2288 0.477 Inf    -0.707    1.1642
##  traditional          spoken    0.4831 0.402 Inf    -0.305    1.2707
##  tmle                 written  -0.7218 0.338 Inf    -1.385   -0.0588
##  traditional          written   0.5106 0.468 Inf    -0.406    1.4271
## 
## Dimension = DIM6:
##  learning_environment mode      emmean    SE  df asymp.LCL asymp.UCL
##  tmle                 spoken    0.7705 0.477 Inf    -0.165    1.7059
##  traditional          spoken   -0.6767 0.402 Inf    -1.464    0.1110
##  tmle                 written  -1.1358 0.338 Inf    -1.799   -0.4728
##  traditional          written  -0.0702 0.468 Inf    -0.987    0.8463
## 
## Degrees-of-freedom method: asymptotic 
## Confidence level used: 0.95 
## 
## $contrasts
## Dimension = DIM1:
##  contrast                                 estimate    SE  df z.ratio p.value
##  tmle spoken - traditional spoken         -10.2986 2.037 Inf  -5.055 <.0001 
##  tmle spoken - tmle written                16.2633 2.173 Inf   7.483 <.0001 
##  tmle spoken - traditional written         14.2182 2.327 Inf   6.110 <.0001 
##  traditional spoken - tmle written         26.5619 1.701 Inf  15.614 <.0001 
##  traditional spoken - traditional written  24.5168 1.894 Inf  12.947 <.0001 
##  tmle written - traditional written        -2.0451 1.606 Inf  -1.273 0.5801 
## 
## Dimension = DIM2:
##  contrast                                 estimate    SE  df z.ratio p.value
##  tmle spoken - traditional spoken           4.9887 0.535 Inf   9.320 <.0001 
##  tmle spoken - tmle written                -2.8073 0.585 Inf  -4.799 <.0001 
##  tmle spoken - traditional written         -7.4038 0.668 Inf -11.081 <.0001 
##  traditional spoken - tmle written         -7.7960 0.525 Inf -14.841 <.0001 
##  traditional spoken - traditional written -12.3924 0.617 Inf -20.099 <.0001 
##  tmle written - traditional written        -4.5965 0.482 Inf  -9.529 <.0001 
## 
## Dimension = DIM3:
##  contrast                                 estimate    SE  df z.ratio p.value
##  tmle spoken - traditional spoken          -0.5985 0.535 Inf  -1.118 0.6783 
##  tmle spoken - tmle written                 4.0830 0.585 Inf   6.980 <.0001 
##  tmle spoken - traditional written          6.5714 0.668 Inf   9.835 <.0001 
##  traditional spoken - tmle written          4.6815 0.525 Inf   8.912 <.0001 
##  traditional spoken - traditional written   7.1699 0.617 Inf  11.629 <.0001 
##  tmle written - traditional written         2.4885 0.482 Inf   5.159 <.0001 
## 
## Dimension = DIM4:
##  contrast                                 estimate    SE  df z.ratio p.value
##  tmle spoken - traditional spoken           1.2429 0.535 Inf   2.322 0.0930 
##  tmle spoken - tmle written                 2.6862 0.585 Inf   4.592 <.0001 
##  tmle spoken - traditional written          2.2884 0.668 Inf   3.425 0.0034 
##  traditional spoken - tmle written          1.4433 0.525 Inf   2.748 0.0306 
##  traditional spoken - traditional written   1.0454 0.617 Inf   1.696 0.3260 
##  tmle written - traditional written        -0.3979 0.482 Inf  -0.825 0.8427 
## 
## Dimension = DIM5:
##  contrast                                 estimate    SE  df z.ratio p.value
##  tmle spoken - traditional spoken          -0.2543 0.535 Inf  -0.475 0.9646 
##  tmle spoken - tmle written                 0.9506 0.585 Inf   1.625 0.3644 
##  tmle spoken - traditional written         -0.2818 0.668 Inf  -0.422 0.9748 
##  traditional spoken - tmle written          1.2049 0.525 Inf   2.294 0.0994 
##  traditional spoken - traditional written  -0.0276 0.617 Inf  -0.045 1.0000 
##  tmle written - traditional written        -1.2324 0.482 Inf  -2.555 0.0519 
## 
## Dimension = DIM6:
##  contrast                                 estimate    SE  df z.ratio p.value
##  tmle spoken - traditional spoken           1.4472 0.535 Inf   2.704 0.0346 
##  tmle spoken - tmle written                 1.9063 0.585 Inf   3.259 0.0062 
##  tmle spoken - traditional written          0.8407 0.668 Inf   1.258 0.5897 
##  traditional spoken - tmle written          0.4591 0.525 Inf   0.874 0.8183 
##  traditional spoken - traditional written  -0.6065 0.617 Inf  -0.984 0.7588 
##  tmle written - traditional written        -1.0656 0.482 Inf  -2.209 0.1207 
## 
## Degrees-of-freedom method: asymptotic 
## P value adjustment: tukey method for comparing a family of 4 estimates

3.4.6 Effect sizes

emmeans::eff_size(mm_mode, sigma = sigma(mode_1.a1), edf = Inf)
## Dimension = DIM1:
##  contrast                                   effect.size    SE  df asymp.LCL
##  (tmle spoken - traditional spoken)            -2.53425 0.501 Inf   -3.5168
##  (tmle spoken - tmle written)                   4.00203 0.535 Inf    2.9538
##  (tmle spoken - traditional written)            3.49879 0.573 Inf    2.3764
##  (traditional spoken - tmle written)            6.53629 0.419 Inf    5.7158
##  (traditional spoken - traditional written)     6.03304 0.466 Inf    5.1198
##  (tmle written - traditional written)          -0.50324 0.395 Inf   -1.2780
##  asymp.UCL
##    -1.5517
##     5.0502
##     4.6211
##     7.3568
##     6.9463
##     0.2715
## 
## Dimension = DIM2:
##  contrast                                   effect.size    SE  df asymp.LCL
##  (tmle spoken - traditional spoken)             1.22760 0.132 Inf    0.9694
##  (tmle spoken - tmle written)                  -0.69081 0.144 Inf   -0.9729
##  (tmle spoken - traditional written)           -1.82190 0.164 Inf   -2.1442
##  (traditional spoken - tmle written)           -1.91841 0.129 Inf   -2.1718
##  (traditional spoken - traditional written)    -3.04950 0.152 Inf   -3.3469
##  (tmle written - traditional written)          -1.13108 0.119 Inf   -1.3637
##  asymp.UCL
##     1.4858
##    -0.4087
##    -1.4996
##    -1.6651
##    -2.7521
##    -0.8984
## 
## Dimension = DIM3:
##  contrast                                   effect.size    SE  df asymp.LCL
##  (tmle spoken - traditional spoken)            -0.14728 0.132 Inf   -0.4054
##  (tmle spoken - tmle written)                   1.00473 0.144 Inf    0.7226
##  (tmle spoken - traditional written)            1.61708 0.164 Inf    1.2948
##  (traditional spoken - tmle written)            1.15201 0.129 Inf    0.8987
##  (traditional spoken - traditional written)     1.76436 0.152 Inf    1.4670
##  (tmle written - traditional written)           0.61235 0.119 Inf    0.3797
##  asymp.UCL
##     0.1109
##     1.2869
##     1.9393
##     1.4054
##     2.0617
##     0.8450
## 
## Dimension = DIM4:
##  contrast                                   effect.size    SE  df asymp.LCL
##  (tmle spoken - traditional spoken)             0.30586 0.132 Inf    0.0477
##  (tmle spoken - tmle written)                   0.66102 0.144 Inf    0.3789
##  (tmle spoken - traditional written)            0.56312 0.164 Inf    0.2409
##  (traditional spoken - tmle written)            0.35516 0.129 Inf    0.1018
##  (traditional spoken - traditional written)     0.25726 0.152 Inf   -0.0401
##  (tmle written - traditional written)          -0.09791 0.119 Inf   -0.3306
##  asymp.UCL
##     0.5640
##     0.9432
##     0.8854
##     0.6085
##     0.5546
##     0.1347
## 
## Dimension = DIM5:
##  contrast                                   effect.size    SE  df asymp.LCL
##  (tmle spoken - traditional spoken)            -0.06257 0.132 Inf   -0.3207
##  (tmle spoken - tmle written)                   0.23392 0.144 Inf   -0.0482
##  (tmle spoken - traditional written)           -0.06935 0.164 Inf   -0.3916
##  (traditional spoken - tmle written)            0.29649 0.129 Inf    0.0431
##  (traditional spoken - traditional written)    -0.00678 0.152 Inf   -0.3042
##  (tmle written - traditional written)          -0.30327 0.119 Inf   -0.5359
##  asymp.UCL
##     0.1956
##     0.5161
##     0.2529
##     0.5498
##     0.2906
##    -0.0706
## 
## Dimension = DIM6:
##  contrast                                   effect.size    SE  df asymp.LCL
##  (tmle spoken - traditional spoken)             0.35612 0.132 Inf    0.0980
##  (tmle spoken - tmle written)                   0.46910 0.144 Inf    0.1870
##  (tmle spoken - traditional written)            0.20687 0.164 Inf   -0.1154
##  (traditional spoken - tmle written)            0.11298 0.129 Inf   -0.1404
##  (traditional spoken - traditional written)    -0.14925 0.152 Inf   -0.4466
##  (tmle written - traditional written)          -0.26223 0.119 Inf   -0.4949
##  asymp.UCL
##     0.6143
##     0.7512
##     0.5291
##     0.3663
##     0.1481
##    -0.0296
## 
## sigma used for effect sizes: 4.064 
## Degrees-of-freedom method: inherited from asymptotic when re-gridding 
## Confidence level used: 0.95

3.4.7 Plot for Mode comparison

emmip(mode_1.a1, learning_environment ~ mode | Dimension, CIs = T,  adjust = "tukey") +
  theme_bw() +
  facet_wrap(~Dimension, ncol = 2) +
  theme(legend.position = "bottom")